From 357c3c4c6cd60c9a53e822ae4370b952f7f69dd6 Mon Sep 17 00:00:00 2001 From: Brett Date: Wed, 18 Jul 2018 06:12:10 -0400 Subject: [PATCH 01/90] Add material data parsing --- ttyd-tools/ttydview/ttydview.py | 262 ++++++++++++++++++++++++++++++++ 1 file changed, 262 insertions(+) create mode 100644 ttyd-tools/ttydview/ttydview.py diff --git a/ttyd-tools/ttydview/ttydview.py b/ttyd-tools/ttydview/ttydview.py new file mode 100644 index 00000000..1885908a --- /dev/null +++ b/ttyd-tools/ttydview/ttydview.py @@ -0,0 +1,262 @@ +""" +File: ttydview.py +Author: SolidifiedGaming aka Brett B. +Revision: 1.0 +Purpose: Create .obj file from map collision data for viewing +""" + +import os +import math +import numpy +import struct + +inputPath = os.path.join(os.path.dirname(__file__), "map_data") + "/" +inputFile = inputPath + input("Enter the name of the input file: ") +outputPath = os.path.join(os.path.dirname(__file__), "obj_files") + "/" +outputFile = outputPath + input("Enter the name of the output file: ") + ".obj" + +f = open(inputFile, "rb") +binaryData = f.read() +f.close() + +class Vector3f(object): + def __init__(self, address, x = 0.0, y = 0.0, z = 0.0): + if address == None: + self.x = float(x) + self.y = float(y) + self.z = float(z) + else: + self.x = struct.unpack_from(">f", binaryData, address)[0] + self.y = struct.unpack_from(">f", binaryData, address + 0x4)[0] + self.z = struct.unpack_from(">f", binaryData, address + 0x8)[0] + +class Box(object): + def __init__(self, maximum, minimum): + self.max = maximum + self.min = minimum + +class Mesh(object): + def __init__(self, parentClass, address): + self.parentClass = parentClass + self.unk_00 = struct.unpack_from(">L", binaryData, address)[0] + self.polygonCount = struct.unpack_from(">L", binaryData, address + 0x4)[0] + self.elementMask = struct.unpack_from(">L", binaryData, address + 0x8)[0] + self.vcdTableOffset = struct.unpack_from(">L", binaryData, address + 0xC)[0] + self.polygonInfo = [] + + for i in range(self.polygonCount): + self.polygonInfo.append(PolygonInfo(self, address + 0x10 + (0x8*i))) + +class MeshDescriptor(object): + def __init__(self, address): + self.materialOffset = struct.unpack_from(">L", binaryData, address)[0] + self.meshOffset = struct.unpack_from(">L", binaryData, address + 0x4)[0] + +class PolygonInfo(object): + def __init__(self, parentClass, address): + self.parentClass = parentClass + self.offset = struct.unpack_from(">L", binaryData, address)[0] + self.size = struct.unpack_from(">L", binaryData, address + 0x4)[0] + self.data = Polygon(self, 0x20 + self.offset) + +class Polygon(object): + def __init__(self, parentClass, address): + self.parentClass = parentClass + self.unk_00 = struct.unpack_from(">H", binaryData, address)[0] + self.vertexCount = struct.unpack_from(">B", binaryData, address + 0x2)[0] + self.vertices = [] + + vertexSize = 0x2*bin(parentClass.parentClass.elementMask).count("1") + + for i in range(self.vertexCount): + self.vertices.append(Vertex(self, address + 0x3 + (vertexSize*i))) + +class Vertex(object): + def __init__(self, parentClass, address): + self.parentClass = parentClass + + if (parentClass.parentClass.parentClass.elementMask & 0x001): + self.positionIndex = struct.unpack_from(">H", binaryData, address)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x002): + self.normalIndex = struct.unpack_from(">H", binaryData, address + 0x2)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x004): + self.colorIndex0 = struct.unpack_from(">H", binaryData, address + 0x4)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x008): + self.colorIndex1 = struct.unpack_from(">H", binaryData, address + 0x6)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x010): + self.textureCoordinateIndex0 = struct.unpack_from(">H", binaryData, address + 0x8)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x020): + self.textureCoordinateIndex1 = struct.unpack_from(">H", binaryData, address + 0xA)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x040): + self.textureCoordinateIndex2 = struct.unpack_from(">H", binaryData, address + 0xC)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x080): + self.textureCoordinateIndex3 = struct.unpack_from(">H", binaryData, address + 0xE)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x100): + self.textureCoordinateIndex4 = struct.unpack_from(">H", binaryData, address + 0x10)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x200): + self.textureCoordinateIndex5 = struct.unpack_from(">H", binaryData, address + 0x12)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x400): + self.textureCoordinateIndex6 = struct.unpack_from(">H", binaryData, address + 0x14)[0] + + if (parentClass.parentClass.parentClass.elementMask & 0x800): + self.textureCoordinateIndex7 = struct.unpack_from(">H", binaryData, address + 0x16)[0] + +class Node(object): + def __init__(self, address): + self.nameOffset = struct.unpack_from(">L", binaryData, address)[0] + self.name = struct.unpack_from(">{}s".format(getStringLength(0x20 + self.nameOffset)), binaryData, 0x20 + self.nameOffset)[0].decode("utf-8") + self.typeOffset = struct.unpack_from(">L", binaryData, address + 0x4)[0] + self.type = struct.unpack_from(">{}s".format(getStringLength(0x20 + self.typeOffset)), binaryData, 0x20 + self.typeOffset)[0].decode("utf-8") + self.parentOffset = struct.unpack_from(">L", binaryData, address + 0x8)[0] + self.childOffset = struct.unpack_from(">L", binaryData, address + 0xC)[0] + self.nextOffset = struct.unpack_from(">L", binaryData, address + 0x10)[0] + self.prevOffset = struct.unpack_from(">L", binaryData, address + 0x14)[0] + self.scale = Vector3f(address + 0x18) + self.rotation = Vector3f(address + 0x24) + self.translation = Vector3f(address + 0x30) + self.bbox = Box(Vector3f(address + 0x3C), Vector3f(address + 0x48)) + self.unk_54 = struct.unpack_from(">L", binaryData, address + 0x54)[0] + self.unkOffset = struct.unpack_from(">L", binaryData, address + 0x58)[0] + self.meshCount = struct.unpack_from(">L", binaryData, address + 0x5C)[0] + self.parentNode = None + self.children = [] + + self.meshDescriptors = [] + self.meshes = [] + + if self.meshCount > 0: + for i in range(self.meshCount): + self.meshDescriptors.append(MeshDescriptor(address + 0x60 + (0x8*i))) + + for j in range(self.meshCount): + self.meshes.append(Mesh(self, 0x20 + self.meshDescriptors[j].meshOffset)) + + def add_child(self, address): + childNode = Node(address) + self.children.append(childNode) + childNode.parentNode = self + +class VCDTable(object): + def __init__(self, address): + self.positionOffset = struct.unpack_from(">L", binaryData, address)[0] + self.normalOffset = struct.unpack_from(">L", binaryData, address + 0x4)[0] + self.unk08 = struct.unpack_from(">L", binaryData, address + 0x8)[0] + self.colorOffset0 = struct.unpack_from(">L", binaryData, address + 0xC)[0] + self.colorOffset1 = struct.unpack_from(">L", binaryData, address + 0x10)[0] + self.unk_14 = struct.unpack_from(">L", binaryData, address + 0x14)[0] + self.textureCoordinateOffsets = [] + self.unk_38 = struct.unpack_from(">L", binaryData, address + 0x38)[0] + self.unk_3C = struct.unpack_from(">L", binaryData, address + 0x3C)[0] + self.unk_40 = struct.unpack_from(">L", binaryData, address + 0x40)[0] + self.positionQuantizationShift = struct.unpack_from(">L", binaryData, address + 0x44)[0] + self.textureCoordinateQuantizationShifts = [] + + for i in range(8): + self.textureCoordinateOffsets.append(struct.unpack_from(">L", binaryData, address + 0x18 + (0x4*i))[0]) + self.textureCoordinateQuantizationShifts.append(struct.unpack_from(">L", binaryData, address + 0x44 + (0x4*i))[0]) + + positionCount = struct.unpack_from(">L", binaryData, 0x20 + self.positionOffset)[0] + self.positions = [] + + for i in range(positionCount): + x = rawToFloat(0x24 + self.positionOffset + (0x6*i), self.positionQuantizationShift) + y = rawToFloat(0x26 + self.positionOffset + (0x6*i), self.positionQuantizationShift) + z = rawToFloat(0x28 + self.positionOffset + (0x6*i), self.positionQuantizationShift) + self.positions.append(Vector3f(None, x, y, z)) + +def rawToFloat(address, scale): + num = struct.unpack_from(">h", binaryData, address)[0] + return '{:.6f}'.format(num / (2**scale)) + +def getStringLength(address): + charCount = 0 + curChar = struct.unpack_from(">b", binaryData, address)[0] + while curChar: + charCount += 1 + curChar = struct.unpack_from(">b", binaryData, address + charCount)[0] + return charCount + +def fillScene(curNode): + if curNode.nextOffset: + curNode.parentNode.add_child(0x20 + curNode.nextOffset) + fillScene(curNode.parentNode.children[-1]) + if curNode.childOffset: + curNode.add_child(0x20 + curNode.childOffset) + fillScene(curNode.children[-1]) + +sceneGraphRootOffset = struct.unpack_from(">L", binaryData, 0x24)[0] +curAddress = 0x20 + sceneGraphRootOffset + +rootNode = Node(curAddress) +fillScene(rootNode) + + +def getTransformationMatrix(curNode): + radX = math.radians(curNode.rotation.x) + radY = math.radians(curNode.rotation.y) + radZ = math.radians(curNode.rotation.z) + + scaleMatrix = numpy.array([[curNode.scale.x, 0, 0, 0], [0, curNode.scale.y, 0, 0], [0, 0, curNode.scale.z, 0], [0, 0, 0, 1]]) + rotationMatX = numpy.array([[1, 0, 0, 0], [0, math.cos(radX), -math.sin(radX), 0], [0, math.sin(radX), math.cos(radX), 0], [0, 0, 0, 1]]) + rotationMatY = numpy.array([[math.cos(radY), 0, math.sin(radY), 0], [0, 1, 0, 0], [-math.sin(radY), 0, math.cos(radY), 0], [0, 0, 0, 1]]) + rotationMatZ = numpy.array([[math.cos(radZ), -math.sin(radZ), 0, 0], [math.sin(radZ), math.cos(radZ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) + translationMatrix = numpy.array([[1, 0, 0, curNode.translation.x], [0, 1, 0, curNode.translation.y], [0, 0, 1, curNode.translation.z], [0, 0, 0, 1]]) + + return translationMatrix @ (rotationMatZ @ rotationMatY @ rotationMatX) @ scaleMatrix + +def vectorToNumpy(vector): + return numpy.array([(vector.x), (vector.y), (vector.z), 1]) + +def DFS(curNode): + global vcdTable + matrixStack.append(matrixStack[-1] @ getTransformationMatrix(curNode)) + for node in curNode.children: + DFS(node) + if curNode.meshCount: + writeGroup(curNode) + for mesh in curNode.meshes: + if not vertexCount: + vcdTable = VCDTable(0x20 + mesh.vcdTableOffset) + for polyInfo in mesh.polygonInfo: + for vertex in polyInfo.data.vertices: + absolutePos = matrixStack[-1] @ vectorToNumpy(vcdTable.positions[vertex.positionIndex]) + writeVertex(absolutePos) + writeFace(len(polyInfo.data.vertices)) + matrixStack.pop() + +def writeVertex(vertex): + global vertexCount + vertexCount += 1 + out.write("v " + str('{:.6f}'.format(vertex[0])) + " " + str('{:.6f}'.format(vertex[1])) + " " + str('{:.6f}'.format(vertex[2])) + "\n") + +def writeFace(vCount): + global curFaceStart + for v in range(curFaceStart, curFaceStart + vCount-2, 1): + out.write("f " + str(v+1) + " " + str(v+2) + " " + str(v+3) + "\n") + curFaceStart = vertexCount + +def writeGroup(node): + out.write("g " + node.name + "\n") + +vcdTable = None + +matrixStack = [] +identityMatrix = numpy.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) +matrixStack.append(identityMatrix) + +curFaceStart = 0 +vertexCount = 0 + +out = open(outputFile, "w") +DFS(rootNode) +out.close() \ No newline at end of file From f659124ae8f87ffb0273b3f6322dce80ad478505 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 19 Sep 2018 15:31:23 +0200 Subject: [PATCH 02/90] rellink: Update to Python 3 --- ttyd-tools/rellink/rellink.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ttyd-tools/rellink/rellink.py b/ttyd-tools/rellink/rellink.py index 068ba363..ddbd7d1d 100644 --- a/ttyd-tools/rellink/rellink.py +++ b/ttyd-tools/rellink/rellink.py @@ -30,7 +30,7 @@ sections = [] # [list of [offset, size]] imports = [] # [list of [id, offset]] -for i in xrange(section_count): +for i in range(section_count): curOffset = section_info_offset + 8 * i offset = (struct.unpack(">L", file_data[curOffset:curOffset+0x4])[0]) & ~1 # remove bit 0 (exec bit) @@ -38,11 +38,11 @@ sections.append([offset, size]) -print str(section_count) + " sections" +print(str(section_count) + " sections") -print sections +print(sections) -for i in xrange(import_size / 8): +for i in range(import_size // 8): curOffset = import_offset + 8 * i id = struct.unpack(">L", file_data[curOffset:curOffset+0x4])[0] @@ -50,9 +50,9 @@ imports.append([id, offset]) -print str(import_size / 8) + " import lists" +print(str(import_size / 8) + " import lists") -print imports +print(imports) for import_entry in imports: @@ -67,7 +67,7 @@ addend = struct.unpack(">L", file_data[curOffset+0x4:curOffset+0x8])[0] curOffset += 8 - print "Processing import entry: " + format(curRelOffset, "x") + " / " + format(operation, "x") + " / " + format(targetSection, "x") + " / " + format(addend, "x") + print("Processing import entry: " + format(curRelOffset, "x") + " / " + format(operation, "x") + " / " + format(targetSection, "x") + " / " + format(addend, "x")) effectiveOffset = sections[curRelSection][0] + curRelOffset if relID == import_entry[0]: @@ -75,7 +75,7 @@ else: targetAddress = addend - print format(effectiveOffset, "x") + " / " + format(targetAddress, "x") + print(format(effectiveOffset, "x") + " / " + format(targetAddress, "x")) #if operation == 0 or operation == 201: # R_PPC_NONE || R_DOLPHIN_NOP # dummy = 0 @@ -101,9 +101,9 @@ elif operation == 203: # R_DOLPHIN_END break else: - print "Unknown relocation operation " + format(opcode, "x") + print("Unknown relocation operation " + format(operation, "x")) -output_data = str(bytearray(file_data)) +output_data = bytearray(file_data) filename_out = filename_in + ".linked" output = open(filename_out , "wb") From 7fb498fd145d97e035ba8ed029dcb518e360b5e2 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 19 Sep 2018 15:32:11 +0200 Subject: [PATCH 03/90] ttydasm: Add compile-time support for SPM branch --- ttyd-tools/ttydasm/ttydasm.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ttyd-tools/ttydasm/ttydasm.cpp b/ttyd-tools/ttydasm/ttydasm.cpp index ec3662d2..441c6886 100644 --- a/ttyd-tools/ttydasm/ttydasm.cpp +++ b/ttyd-tools/ttydasm/ttydasm.cpp @@ -185,6 +185,9 @@ enum ScriptOpcode OP_MemOpReadFloat3, OP_MemOpReadFloat4, OP_MemOpReadFloatIndexed, +#ifdef GAME_SPM + OP_ClampInt, +#endif OP_SetUserWordBase, OP_SetUserFlagBase, OP_AllocateUserWordBase, @@ -547,6 +550,9 @@ std::string disassembleOpcode(uint32_t &address, std::string &indent, bool *done PASSTHROUGH(OP_MemOpReadFloat3, "mo_read_float3"); PASSTHROUGH(OP_MemOpReadFloat4, "mo_read_float4"); PASSTHROUGH(OP_MemOpReadFloatIndexed,"mo_read_float_indexed"); +#ifdef GAME_SPM + PASSTHROUGH(OP_ClampInt, "clampi"); +#endif PASSTHROUGH(OP_SetUserWordBase, "set_uw_base"); PASSTHROUGH(OP_SetUserFlagBase, "set_uf_base"); PASSTHROUGH(OP_AllocateUserWordBase,"alloc_uw"); From 2837453114465469a03e22f8c2d6e7c8c78cc666 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 19 Sep 2018 20:36:00 +0200 Subject: [PATCH 04/90] rel: Add driver for GameCube controller port keyboard --- ttyd-tools/rel/include/keyboard.h | 212 +++++++++++++++++++++++++++++ ttyd-tools/rel/include/mod.h | 3 + ttyd-tools/rel/include/ttyd.eu.lst | 35 ++++- ttyd-tools/rel/include/ttyd.jp.lst | 35 ++++- ttyd-tools/rel/include/ttyd.us.lst | 33 +++++ ttyd-tools/rel/include/ttyd/si.h | 38 ++++++ ttyd-tools/rel/source/keyboard.cpp | 127 +++++++++++++++++ ttyd-tools/rel/source/mod.cpp | 40 ++++-- 8 files changed, 513 insertions(+), 10 deletions(-) create mode 100644 ttyd-tools/rel/include/keyboard.h create mode 100644 ttyd-tools/rel/include/ttyd/si.h create mode 100644 ttyd-tools/rel/source/keyboard.cpp diff --git a/ttyd-tools/rel/include/keyboard.h b/ttyd-tools/rel/include/keyboard.h new file mode 100644 index 00000000..3062623c --- /dev/null +++ b/ttyd-tools/rel/include/keyboard.h @@ -0,0 +1,212 @@ +#pragma once + +#include + +#include + +#include + +namespace mod { + +enum class KeyCode : uint8_t +{ + kInvalid = 0x00, + + kHome = 0x06, + kEnd = 0x07, + kPageUp = 0x08, + kPageDown = 0x09, + kScrollLock = 0x0A, + + kA = 0x10, + kB = 0x11, + kC = 0x12, + kD = 0x13, + kE = 0x14, + kF = 0x15, + kG = 0x16, + kH = 0x17, + kI = 0x18, + kJ = 0x19, + kK = 0x1a, + kL = 0x1b, + kM = 0x1c, + kN = 0x1d, + kO = 0x1e, + kP = 0x1f, + kQ = 0x20, + kR = 0x21, + kS = 0x22, + kT = 0x23, + kU = 0x24, + kV = 0x25, + kW = 0x26, + kX = 0x27, + kY = 0x28, + kZ = 0x29, + k1 = 0x2a, + k2 = 0x2b, + k3 = 0x2c, + k4 = 0x2d, + k5 = 0x2e, + k6 = 0x2f, + k7 = 0x30, + k8 = 0x31, + k9 = 0x32, + k0 = 0x33, + kMinus = 0x34, + kPlus = 0x35, + kPrintScreen = 0x36, + kBracketOpen = 0x37, + kBracketClose = 0x38, + kColon = 0x39, + kQuote = 0x3a, + kHash = 0x3b, + kComma = 0x3c, + kPeriod = 0x3d, + kSlash = 0x3e, + kBackslash = 0x3f, + kF1 = 0x40, + kF2 = 0x41, + kF3 = 0x42, + kF4 = 0x43, + kF5 = 0x44, + kF6 = 0x45, + kF7 = 0x46, + kF8 = 0x47, + kF9 = 0x48, + kF10 = 0x49, + kF11 = 0x4a, + kF12 = 0x4b, + kEscape = 0x4c, + kInsert = 0x4d, + kDelete = 0x4e, + kTilde = 0x4f, + kBackspace = 0x50, + kTab = 0x51, + + kCapsLock = 0x53, + kLeftShift = 0x54, + kRightShift = 0x55, + kLeftControl = 0x56, + kRightAlt = 0x57, + kLeftWindows = 0x58, + kSpace = 0x59, + kRightWindows = 0x5a, + kMenu = 0x5b, + kLeftArrow = 0x5c, + kDownArrow = 0x5d, + kUpArrow = 0x5e, + kRightArrow = 0x5f, + kEnter = 0x61, +}; + +class Keyboard +{ +public: + Keyboard(int channel); + + void setChannel(int channel); + void update(); + + int getKeyDownCount() + { + return mKeysDownCount; + } + KeyCode getKeyDown(int index) + { + return mKeysDown[index]; + } + bool isKeyDown(KeyCode key) + { + for (int i = 0; i < mKeysDownCount; ++i) + { + if (mKeysDown[i] == key) + { + return true; + } + } + return false; + } + + int getKeyPressedCount() + { + return mKeysPressedCount; + } + KeyCode getKeyPressed(int index) + { + return mKeysPressed[index]; + } + bool isKeyPressed(KeyCode key) + { + for (int i = 0; i < mKeysPressedCount; ++i) + { + if (mKeysPressed[i] == key) + { + return true; + } + } + return false; + } + + int getKeyReleasedCount() + { + return mKeysReleasedCount; + } + KeyCode getKeyReleased(int index) + { + return mKeysReleased[index]; + } + bool isKeyReleased(KeyCode key) + { + for (int i = 0; i < mKeysReleasedCount; ++i) + { + if (mKeysReleased[i] == key) + { + return true; + } + } + return false; + } + + static constexpr char getCharForKeycode(KeyCode code) + { + if (code >= KeyCode::kA && code <= KeyCode::kZ) + { + return static_cast('A' + static_cast(code) - static_cast(KeyCode::kA)); + } + else if (code >= KeyCode::k0 && code <= KeyCode::k9) + { + return static_cast('0' + static_cast(code) - static_cast(KeyCode::k0)); + } + else + { + return '\0'; + } + } + +private: + bool connect(); + void disconnect(); + +public: + const static int cMaxKeysPressed = 3; + +private: + int mKeysPrevCount = 0; + KeyCode mKeysPrev[cMaxKeysPressed]; + + int mKeysDownCount = 0; + KeyCode mKeysDown[cMaxKeysPressed]; + + int mKeysReleasedCount = 0; + KeyCode mKeysReleased[cMaxKeysPressed]; + + int mKeysPressedCount = 0; + KeyCode mKeysPressed[cMaxKeysPressed]; + + bool mConnected = false; + int mChannel = -1; +}; + +} \ No newline at end of file diff --git a/ttyd-tools/rel/include/mod.h b/ttyd-tools/rel/include/mod.h index 37f55375..b1f5ebf1 100644 --- a/ttyd-tools/rel/include/mod.h +++ b/ttyd-tools/rel/include/mod.h @@ -1,6 +1,7 @@ #pragma once #include "timer.h" +#include "keyboard.h" #include @@ -22,6 +23,8 @@ class Mod void (*mPFN_makeKey_trampoline)() = nullptr; char mDisplayBuffer[256]; + + Keyboard *mKeyboard = nullptr; }; } \ No newline at end of file diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index 7315dff4..dfea7089 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -1,3 +1,8 @@ +// __mem.c +800050b4:memset +//800050e4:__fill_mem +8000519c:memcpy + // system.o // unused:memcmp_as4 // unused:memset_as4 @@ -251,4 +256,32 @@ // 8026f204:double2hex // 8026f53c:longlong2str // 8026f850:long2str -// 8026faa8:parse_format \ No newline at end of file +// 8026faa8:parse_format + +// si.a +802c194c:SIBusy +802c196c:SIIsChanBusy +802c19a8:CompleteTransfer +802c1ca4:SIInterruptHandler +802c1fe8:SIEnablePollingInterrupt +802c2080:SIRegisterPollingHandler +802c214c:SIUnregisterPollingHandler +802c2240:SIInit +802c22f4:__SITransfer +802c2500:SIGetStatus +802c257c:SISetCommand +802c2590:SITransferCommands +802c25a0:SISetXY +802c260c:SIEnablePolling +802c26a8:SIDisablePolling +802c2714:SIGetResponseRaw +802c27e8:SIGetResponse +802c28ac:AlarmHandler +802c2938:SITransfer +802c2aa4:GetTypeCallback +802c2d3c:SIGetType +802c2f00:SIGetTypeAsync +802c303c:SIDecodeType +802c3188:SIProbe +802c31ac:SISetSamplingRate +802c3290:SIRefreshSamplingRate \ No newline at end of file diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index bc78a2cb..e9a45df8 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -1,3 +1,8 @@ +// __mem.c +800050b4:memset +//800050e4:__fill_mem +8000519c:memcpy + // system.o // unused:memcmp_as4 // unused:memset_as4 @@ -249,4 +254,32 @@ // 80265460:double2hex // 80265798:longlong2str // 80265aac:long2str -// 80265d04:parse_format \ No newline at end of file +// 80265d04:parse_format + +// si.a +802b7acc:SIBusy +802b7aec:SIIsChanBusy +802b7b28:CompleteTransfer +802b7e24:SIInterruptHandler +802b8168:SIEnablePollingInterrupt +802b8200:SIRegisterPollingHandler +802b82cc:SIUnregisterPollingHandler +802b83c0:SIInit +802b8474:__SITransfer +802b8680:SIGetStatus +802b86fc:SISetCommand +802b8710:SITransferCommands +802b8720:SISetXY +802b878c:SIEnablePolling +802b8828:SIDisablePolling +802b8894:SIGetResponseRaw +802b8968:SIGetResponse +802b8a2c:AlarmHandler +802b8ab8:SITransfer +802b8c24:GetTypeCallback +802b8ebc:SIGetType +802b9080:SIGetTypeAsync +802b91bc:SIDecodeType +802b9308:SIProbe +802b932c:SISetSamplingRate +802b9410:SIRefreshSamplingRate \ No newline at end of file diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index ee192b65..37e1b8ac 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -1,3 +1,8 @@ +// __mem.c +800050b4:memset +//800050e4:__fill_mem +8000519c:memcpy + // system.o // unused:memcmp_as4 // unused:memset_as4 @@ -252,3 +257,31 @@ // 8026b748:longlong2str // 8026ba5c:long2str // 8026bcb4:parse_format + +// si.a +802bd7b8:SIBusy +802bd7d8:SIIsChanBusy +802bd814:CompleteTransfer +802bdb10:SIInterruptHandler +802bde54:SIEnablePollingInterrupt +802bdeec:SIRegisterPollingHandler +802bdfb8:SIUnregisterPollingHandler +802be0ac:SIInit +802be160:__SITransfer +802be36c:SIGetStatus +802be3e8:SISetCommand +802be3fc:SITransferCommands +802be40c:SISetXY +802be478:SIEnablePolling +802be514:SIDisablePolling +802be580:SIGetResponseRaw +802be654:SIGetResponse +802be718:AlarmHandler +802be7a4:SITransfer +802be910:GetTypeCallback +802beba8:SIGetType +802bed6c:SIGetTypeAsync +802beea8:SIDecodeType +802beff4:SIProbe +802bf018:SISetSamplingRate +802bf0fc:SIRefreshSamplingRate \ No newline at end of file diff --git a/ttyd-tools/rel/include/ttyd/si.h b/ttyd-tools/rel/include/ttyd/si.h new file mode 100644 index 00000000..71b97f91 --- /dev/null +++ b/ttyd-tools/rel/include/ttyd/si.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +namespace ttyd::si { + +extern "C" { + +bool SIBusy(); +bool SIIsChanBusy(uint32_t channel); +// local: CompleteTransfer +// local: SIInterruptHandler +// local: SIEnablePollingInterrupt +bool SIRegisterPollingHandler(void *handler); +bool SIUnregisterPollingHandler(void *handler); +void SIInit(); +// local: __SITransfer +uint32_t SIGetStatus(uint32_t channel); +void SISetCommand(uint32_t channel, uint32_t command); +void SITransferCommands(); +uint32_t SISetXY(uint16_t lineInterval, uint8_t count); +uint32_t SIEnablePolling(uint32_t pollMask); +uint32_t SIDisablePolling(uint32_t pollMask); +// local: SIGetResponseRaw +uint32_t SIGetResponse(uint32_t channel, void *buffer); +// local: AlarmHandler +uint32_t SITransfer(uint32_t channel, const void *bufferOut, uint32_t lengthOut, void *bufferIn, uint32_t lengthIn, void *callback); +// local: GetTypeCallback +uint32_t SIGetType(uint32_t channel); +uint32_t SIGetTypeAsync(uint32_t channel, void *callback); +uint32_t SIDecodeType(uint32_t value); +uint32_t SIProbe(uint32_t channel); +void SISetSamplingRate(uint32_t rate); +void SIRefreshSamplingRate(); + +} + +} \ No newline at end of file diff --git a/ttyd-tools/rel/source/keyboard.cpp b/ttyd-tools/rel/source/keyboard.cpp new file mode 100644 index 00000000..fb5de22e --- /dev/null +++ b/ttyd-tools/rel/source/keyboard.cpp @@ -0,0 +1,127 @@ +#include "keyboard.h" + +#include + +namespace mod { + +Keyboard::Keyboard(int channel) +{ + setChannel(channel); +} + +void Keyboard::setChannel(int channel) +{ + // Disconnect if necessary + if (mConnected) + { + disconnect(); + } + + // Set new channel + mChannel = channel; + + // Reconnect + connect(); +} + +bool Keyboard::connect() +{ + if (ttyd::si::SIProbe(mChannel) != 0x08200000) + { + // No keyboard in that slot + return false; + } + + // Enable polling from device + ttyd::si::SIEnablePolling(1 << (31 - mChannel)); + + mConnected = true; + return true; +} + +void Keyboard::disconnect() +{ + // Flush response + uint64_t message; + ttyd::si::SIGetResponse(mChannel, &message); + + // Disable polling + ttyd::si::SIDisablePolling(1 << (31 - mChannel)); + mConnected = false; +} + +void Keyboard::update() +{ + if (!mConnected && !connect()) + { + return; + } + + // Poll next state + ttyd::si::SISetCommand(mChannel, 0x00540000); + ttyd::si::SITransferCommands(); + + // Read data + uint64_t message; + if (!ttyd::si::SIGetResponse(mChannel, &message) || message & (1LL << 63)) + { + // Failed to receive response or ERRSTAT is set + disconnect(); + } + + // Save last key state + mKeysPrevCount = mKeysDownCount; + memcpy(mKeysPrev, mKeysDown, sizeof(mKeysPrev)); + + // Read new keys + mKeysDownCount = 0; + for (int i = 0; i < cMaxKeysPressed; ++i) + { + KeyCode code = static_cast((message >> (i * 8 + 8)) & 0xFF); + + if (code == KeyCode::kInvalid) + { + continue; + } + + mKeysDown[mKeysDownCount++] = code; + } + + // Check released keys + mKeysReleasedCount = 0; + for (int i = 0; i < mKeysPrevCount; ++i) + { + bool released = true; + for (int j = 0; j < mKeysDownCount; ++j) + { + if (mKeysPrev[i] == mKeysDown[j]) + { + released = false; + } + } + if (released) + { + mKeysReleased[mKeysReleasedCount++] = mKeysPrev[i]; + } + } + + // Check pressed keys + mKeysPressedCount = 0; + for (int i = 0; i < mKeysDownCount; ++i) + { + bool pressed = true; + for (int j = 0; j < mKeysPrevCount; ++j) + { + if (mKeysDown[i] == mKeysPrev[j]) + { + pressed = false; + } + } + if (pressed) + { + mKeysPressed[mKeysPressedCount++] = mKeysDown[i]; + } + } +} + +} \ No newline at end of file diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index b5d0b103..4ba47e56 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -40,15 +40,15 @@ void Mod::init() ttyd::fontmgr::fontmgrTexSetup(); patch::hookFunction(ttyd::fontmgr::fontmgrTexSetup, [](){}); - // Skip the logo - patch::hookFunction(ttyd::seq_logo::seq_logoMain, [](ttyd::seqdrv::SeqInfo *) - { - ttyd::seqdrv::seqSetSeq(ttyd::seqdrv::SeqIndex::kTitle, nullptr, nullptr); - }); + // Keyboard on controller port 1 + mKeyboard = new Keyboard(1); } void Mod::updateEarly() { + // Keyboard code + mKeyboard->update(); + // Check for font load ttyd::dispdrv::dispEntry(ttyd::dispdrv::DisplayLayer::kDebug3d, 0, [](ttyd::dispdrv::DisplayLayer layerId, void *user) { @@ -83,17 +83,41 @@ void Mod::updateEarly() void Mod::draw() { + char keyDownString[4] = ""; + for (int i = 0; i < mKeyboard->getKeyDownCount(); ++i) + { + KeyCode k = mKeyboard->getKeyDown(i); + keyDownString[i] = Keyboard::getCharForKeycode(k); + } + + char keyPressedString[4] = ""; + for (int i = 0; i < mKeyboard->getKeyPressedCount(); ++i) + { + KeyCode k = mKeyboard->getKeyPressed(i); + keyPressedString[i] = Keyboard::getCharForKeycode(k); + } + + char keyReleasedString[4] = ""; + for (int i = 0; i < mKeyboard->getKeyReleasedCount(); ++i) + { + KeyCode k = mKeyboard->getKeyReleased(i); + keyReleasedString[i] = Keyboard::getCharForKeycode(k); + } + ttyd::mario::Player *player = ttyd::mario::marioGetPtr(); sprintf(mDisplayBuffer, - "Pos: %.2f %.2f %.2f\r\nSpdY: %.2f\r\nPST: %lu", + "Pos: %.2f %.2f %.2f\r\nSpdY: %.2f\r\nPST: %lu\r\nKBD: %s\r\nKBP: %s\r\nKBR: %s", player->playerPosition[0], player->playerPosition[1], player->playerPosition[2], player->wJumpVelocityY, - mPalaceSkipTimer.getValue()); + mPalaceSkipTimer.getValue(), + keyDownString, + keyPressedString, + keyReleasedString); ttyd::fontmgr::FontDrawStart(); uint32_t color = 0xFFFFFFFF; ttyd::fontmgr::FontDrawColor(reinterpret_cast(&color)); ttyd::fontmgr::FontDrawEdge(); - ttyd::fontmgr::FontDrawMessage(-272, -100, mDisplayBuffer); + ttyd::fontmgr::FontDrawMessage(-272, -40, mDisplayBuffer); } } \ No newline at end of file From d403ebe2d2134ac4e380a5288da049ccf16e843c Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 19 Sep 2018 21:34:13 +0200 Subject: [PATCH 05/90] rel: Update Makefile for new elf2rel syntax --- ttyd-tools/rel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ttyd-tools/rel/Makefile b/ttyd-tools/rel/Makefile index d6c2c5db..8fa43fa6 100644 --- a/ttyd-tools/rel/Makefile +++ b/ttyd-tools/rel/Makefile @@ -168,7 +168,7 @@ $(OFILES_SOURCES) : $(HFILES) # REL linking %.rel: %.elf @echo output ... $(notdir $@) - @$(ELF2REL) $< $(MAPFILE) + @$(ELF2REL) $< -s $(MAPFILE) %.gci: %.rel @echo packing ... $(notdir $@) From 6503221cb651a1ae78369d99a8c7a4127d923731 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 19 Sep 2018 21:35:48 +0200 Subject: [PATCH 06/90] elf2rel: Fix necessary sections being removed when empty --- ttyd-tools/elf2rel/elf2rel.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ttyd-tools/elf2rel/elf2rel.cpp b/ttyd-tools/elf2rel/elf2rel.cpp index c4e950b0..a0a92307 100644 --- a/ttyd-tools/elf2rel/elf2rel.cpp +++ b/ttyd-tools/elf2rel/elf2rel.cpp @@ -228,8 +228,8 @@ int main(int argc, char **argv) std::vector sectionInfoBuffer; std::map writtenSections; int totalBssSize = 0; - int maxAlign = 1; - int maxBssAlign = 1; + int maxAlign = 2; + int maxBssAlign = 2; for (const auto §ion : inputElf.sections) { // Should keep? @@ -239,7 +239,7 @@ int main(int argc, char **argv) { return val == section->get_name() || section->get_name().find(val + ".") == 0; - }) != cRelSectionMask.end() && section->get_size() != 0) + }) != cRelSectionMask.end()) { // BSS? if (section->get_type() == SHT_NOBITS) @@ -254,12 +254,12 @@ int main(int argc, char **argv) } else { - // Update max alignment - int align = static_cast(section->get_addr_align()); + // Update max alignment (minimum 2, low offset bit is used for exec flag) + int align = std::max(static_cast(section->get_addr_align()), 2); maxAlign = std::max(maxAlign, align); // Write padding - int requiredPadding = align - outputBuffer.size() % align; + int requiredPadding = ((outputBuffer.size() + align - 1) & ~(align - 1)) - outputBuffer.size(); for (int i = 0; i < requiredPadding; ++i) { save(outputBuffer, 0); From fc1bc32dafb944751015fb06273d55ffafa9d8db Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 19 Sep 2018 21:45:03 +0200 Subject: [PATCH 07/90] Update README --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 49d6969a..626eab36 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ -This contains the tools for Paper Mario: The Thousand Year Door I have built. Most of these were built completely blind without any debug information. - * docs: Not a tool, this folder contains various pieces documentation +This contains various tools built for Paper Mario: The Thousand Year Door, some of which are applicable to other GameCube games as well. + * docs: Various pieces of documentation * ttydasm: Disassembler for the event command scripting language * rellink: REL file linking tool + * elf2rel: Convert from ELF file to REL file + * gcipack: Pack a file into a GCI file to be loaded off memory card + * rel: Framework for writing code on-top of existing GameCube games \ No newline at end of file From 22ee85dc02f16b0d4de9cb50871c1537483fe79c Mon Sep 17 00:00:00 2001 From: Brett Date: Mon, 24 Sep 2018 16:37:45 -0400 Subject: [PATCH 08/90] ttydview: Change filename input to be via command line (#4) * ttydview: Change filename input to be via command line * ttydview: Change filename output to be via command line --- ttyd-tools/ttydview/ttydview.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ttyd-tools/ttydview/ttydview.py b/ttyd-tools/ttydview/ttydview.py index 1885908a..e3c918c5 100644 --- a/ttyd-tools/ttydview/ttydview.py +++ b/ttyd-tools/ttydview/ttydview.py @@ -1,19 +1,20 @@ """ File: ttydview.py -Author: SolidifiedGaming aka Brett B. -Revision: 1.0 +Author: Brett B. +Revision: 1.0.2 Purpose: Create .obj file from map collision data for viewing """ import os +import sys import math import numpy import struct inputPath = os.path.join(os.path.dirname(__file__), "map_data") + "/" -inputFile = inputPath + input("Enter the name of the input file: ") +inputFile = inputPath + sys.argv[1] outputPath = os.path.join(os.path.dirname(__file__), "obj_files") + "/" -outputFile = outputPath + input("Enter the name of the output file: ") + ".obj" +outputFile = outputPath + sys.argv[2] + ".obj" f = open(inputFile, "rb") binaryData = f.read() From c03a98e9c3178344c480a7b696da5e84d6fdccdd Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Fri, 12 Oct 2018 23:51:52 +0200 Subject: [PATCH 09/90] ttydasm: Fix float expressions for SPM --- ttyd-tools/ttydasm/ttydasm.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ttyd-tools/ttydasm/ttydasm.cpp b/ttyd-tools/ttydasm/ttydasm.cpp index 441c6886..74a891cc 100644 --- a/ttyd-tools/ttydasm/ttydasm.cpp +++ b/ttyd-tools/ttydasm/ttydasm.cpp @@ -33,8 +33,13 @@ std::map gSymbolMap; namespace ExpressionZones { const int cZoneExtent = 10000000; +#ifdef GAME_SPM +const int cAddrBase = -270000000; +const int cFloatBase = -240000000; +#else const int cAddrBase = -250000000; const int cFloatBase = -230000000; +#endif const int cUFBase = -210000000; const int cUWBase = -190000000; const int cGSWBase = -170000000; From 3f489c711a80e510f5e0534cb046024aa94ff459 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 17 Oct 2018 14:32:44 +0200 Subject: [PATCH 10/90] rel: Add string.c to symbol maps --- ttyd-tools/rel/include/ttyd.eu.lst | 10 ++++++++++ ttyd-tools/rel/include/ttyd.jp.lst | 9 +++++++++ ttyd-tools/rel/include/ttyd.us.lst | 10 ++++++++++ 3 files changed, 29 insertions(+) diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index dfea7089..a2d64057 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -258,6 +258,16 @@ // 8026f850:long2str // 8026faa8:parse_format +// string.c +80271180:strstr +802711e8:strchr +80271218:strncmp +80271258:strcmp +80271380:strcat +802713ac:strncpy +802713f0:strcpy +802714a8:strlen + // si.a 802c194c:SIBusy 802c196c:SIIsChanBusy diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index e9a45df8..fe8c2cdf 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -256,6 +256,15 @@ // 80265aac:long2str // 80265d04:parse_format +// string.c +802673dc:strchr +8026740c:strncmp +8026744c:strcmp +80267574:strcat +802675a0:strncpy +802675e4:strcpy +8026769c:strlen + // si.a 802b7acc:SIBusy 802b7aec:SIIsChanBusy diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index 37e1b8ac..6ed4023a 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -258,6 +258,16 @@ // 8026ba5c:long2str // 8026bcb4:parse_format +// string.c +8026d38c:strstr +8026d3f4:strchr +8026d424:strncmp +8026d464:strcmp +8026d58c:strcat +8026d5b8:strncpy +8026d5fc:strcpy +8026d6b4:strlen + // si.a 802bd7b8:SIBusy 802bd7d8:SIIsChanBusy From 3a52aa4b15de6ad460f3cf52d31b00d42410eabf Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 17 Oct 2018 17:33:22 +0200 Subject: [PATCH 11/90] rel: Implement rudimentary console --- ttyd-tools/rel/include/keyboard.h | 16 +++++- ttyd-tools/rel/include/mod.h | 5 +- ttyd-tools/rel/source/mod.cpp | 82 +++++++++++++------------------ 3 files changed, 51 insertions(+), 52 deletions(-) diff --git a/ttyd-tools/rel/include/keyboard.h b/ttyd-tools/rel/include/keyboard.h index 3062623c..31255158 100644 --- a/ttyd-tools/rel/include/keyboard.h +++ b/ttyd-tools/rel/include/keyboard.h @@ -169,16 +169,28 @@ class Keyboard return false; } - static constexpr char getCharForKeycode(KeyCode code) + static constexpr char getCharForKeycode(KeyCode code, bool shift = false) { if (code >= KeyCode::kA && code <= KeyCode::kZ) { - return static_cast('A' + static_cast(code) - static_cast(KeyCode::kA)); + return static_cast((shift ? 'A' : 'a') + static_cast(code) - static_cast(KeyCode::kA)); } else if (code >= KeyCode::k0 && code <= KeyCode::k9) { return static_cast('0' + static_cast(code) - static_cast(KeyCode::k0)); } + else if (code == KeyCode::kMinus) + { + return shift ? '_' : '-'; + } + else if (code == KeyCode::kComma) + { + return shift ? ';' : ','; + } + else if (code == KeyCode::kSpace) + { + return ' '; + } else { return '\0'; diff --git a/ttyd-tools/rel/include/mod.h b/ttyd-tools/rel/include/mod.h index b1f5ebf1..f708c313 100644 --- a/ttyd-tools/rel/include/mod.h +++ b/ttyd-tools/rel/include/mod.h @@ -16,12 +16,11 @@ class Mod private: void updateEarly(); void draw(); + void processCommand(const char *command); private: - Timer mPalaceSkipTimer; - bool mPaused = false; - void (*mPFN_makeKey_trampoline)() = nullptr; + char mCommandBuffer[256] = ""; char mDisplayBuffer[256]; Keyboard *mKeyboard = nullptr; diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 4ba47e56..8e6cd88b 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -11,6 +11,7 @@ #include "patch.h" #include +#include namespace mod { @@ -49,70 +50,52 @@ void Mod::updateEarly() // Keyboard code mKeyboard->update(); - // Check for font load + // Register draw command ttyd::dispdrv::dispEntry(ttyd::dispdrv::DisplayLayer::kDebug3d, 0, [](ttyd::dispdrv::DisplayLayer layerId, void *user) { reinterpret_cast(user)->draw(); }, this); - // Palace skip timing code - if (ttyd::mariost::marioStGetSystemLevel() == 0xF) - { - // Reset upon pausing - mPalaceSkipTimer.stop(); - mPalaceSkipTimer.setValue(0); - mPaused = true; - } - else if (ttyd::mariost::marioStGetSystemLevel() == 0 && mPaused) - { - // Start when unpausing - mPalaceSkipTimer.start(); - mPaused = false; - } - - if (ttyd::system::keyGetButtonTrg(0) & 0x0400) + // Keyboard input for prompt + size_t bufferLen = strlen(mCommandBuffer); + for (int i = 0; i < mKeyboard->getKeyPressedCount(); ++i) { - // Stop when pressing A or X - mPalaceSkipTimer.stop(); + KeyCode pressed = mKeyboard->getKeyPressed(i); + char textInput = Keyboard::getCharForKeycode( + pressed, + mKeyboard->isKeyDown(KeyCode::kLeftShift) || mKeyboard->isKeyDown(KeyCode::kRightShift) + ); + if (textInput != '\0') + { + if (bufferLen < sizeof(mCommandBuffer) - 1) + { + mCommandBuffer[bufferLen++] = textInput; + mCommandBuffer[bufferLen] = '\0'; + } + } + else if (pressed == KeyCode::kEnter) + { + processCommand(mCommandBuffer); + mCommandBuffer[0] = '\0'; + } + else if (pressed == KeyCode::kBackspace && bufferLen > 0) + { + mCommandBuffer[--bufferLen] = '\0'; + } } - mPalaceSkipTimer.tick(); - + // Call original function mPFN_makeKey_trampoline(); } void Mod::draw() { - char keyDownString[4] = ""; - for (int i = 0; i < mKeyboard->getKeyDownCount(); ++i) - { - KeyCode k = mKeyboard->getKeyDown(i); - keyDownString[i] = Keyboard::getCharForKeycode(k); - } - - char keyPressedString[4] = ""; - for (int i = 0; i < mKeyboard->getKeyPressedCount(); ++i) - { - KeyCode k = mKeyboard->getKeyPressed(i); - keyPressedString[i] = Keyboard::getCharForKeycode(k); - } - - char keyReleasedString[4] = ""; - for (int i = 0; i < mKeyboard->getKeyReleasedCount(); ++i) - { - KeyCode k = mKeyboard->getKeyReleased(i); - keyReleasedString[i] = Keyboard::getCharForKeycode(k); - } - ttyd::mario::Player *player = ttyd::mario::marioGetPtr(); sprintf(mDisplayBuffer, - "Pos: %.2f %.2f %.2f\r\nSpdY: %.2f\r\nPST: %lu\r\nKBD: %s\r\nKBP: %s\r\nKBR: %s", + "Pos: %.2f %.2f %.2f\r\nSpdY: %.2f\r\nCmd: %s", player->playerPosition[0], player->playerPosition[1], player->playerPosition[2], player->wJumpVelocityY, - mPalaceSkipTimer.getValue(), - keyDownString, - keyPressedString, - keyReleasedString); + mCommandBuffer); ttyd::fontmgr::FontDrawStart(); uint32_t color = 0xFFFFFFFF; ttyd::fontmgr::FontDrawColor(reinterpret_cast(&color)); @@ -120,4 +103,9 @@ void Mod::draw() ttyd::fontmgr::FontDrawMessage(-272, -40, mDisplayBuffer); } +void Mod::processCommand(const char *command) +{ + +} + } \ No newline at end of file From d8ed53e825b144a2c4c0e54f9fd9ed2b8164825e Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 17 Oct 2018 17:46:32 +0200 Subject: [PATCH 12/90] rel: Seperate GameCube SDK includes from game-specific includes --- ttyd-tools/rel/include/{ttyd => gc}/si.h | 2 +- ttyd-tools/rel/include/keyboard.h | 2 -- ttyd-tools/rel/source/keyboard.cpp | 16 +++++++++------- 3 files changed, 10 insertions(+), 10 deletions(-) rename ttyd-tools/rel/include/{ttyd => gc}/si.h (98%) diff --git a/ttyd-tools/rel/include/ttyd/si.h b/ttyd-tools/rel/include/gc/si.h similarity index 98% rename from ttyd-tools/rel/include/ttyd/si.h rename to ttyd-tools/rel/include/gc/si.h index 71b97f91..4ecfc11f 100644 --- a/ttyd-tools/rel/include/ttyd/si.h +++ b/ttyd-tools/rel/include/gc/si.h @@ -2,7 +2,7 @@ #include -namespace ttyd::si { +namespace gc::si { extern "C" { diff --git a/ttyd-tools/rel/include/keyboard.h b/ttyd-tools/rel/include/keyboard.h index 31255158..d25ddd3c 100644 --- a/ttyd-tools/rel/include/keyboard.h +++ b/ttyd-tools/rel/include/keyboard.h @@ -1,7 +1,5 @@ #pragma once -#include - #include #include diff --git a/ttyd-tools/rel/source/keyboard.cpp b/ttyd-tools/rel/source/keyboard.cpp index fb5de22e..669eda36 100644 --- a/ttyd-tools/rel/source/keyboard.cpp +++ b/ttyd-tools/rel/source/keyboard.cpp @@ -1,5 +1,7 @@ #include "keyboard.h" +#include + #include namespace mod { @@ -26,14 +28,14 @@ void Keyboard::setChannel(int channel) bool Keyboard::connect() { - if (ttyd::si::SIProbe(mChannel) != 0x08200000) + if (gc::si::SIProbe(mChannel) != 0x08200000) { // No keyboard in that slot return false; } // Enable polling from device - ttyd::si::SIEnablePolling(1 << (31 - mChannel)); + gc::si::SIEnablePolling(1 << (31 - mChannel)); mConnected = true; return true; @@ -43,10 +45,10 @@ void Keyboard::disconnect() { // Flush response uint64_t message; - ttyd::si::SIGetResponse(mChannel, &message); + gc::si::SIGetResponse(mChannel, &message); // Disable polling - ttyd::si::SIDisablePolling(1 << (31 - mChannel)); + gc::si::SIDisablePolling(1 << (31 - mChannel)); mConnected = false; } @@ -58,12 +60,12 @@ void Keyboard::update() } // Poll next state - ttyd::si::SISetCommand(mChannel, 0x00540000); - ttyd::si::SITransferCommands(); + gc::si::SISetCommand(mChannel, 0x00540000); + gc::si::SITransferCommands(); // Read data uint64_t message; - if (!ttyd::si::SIGetResponse(mChannel, &message) || message & (1LL << 63)) + if (!gc::si::SIGetResponse(mChannel, &message) || message & (1LL << 63)) { // Failed to receive response or ERRSTAT is set disconnect(); From 885cb8e675f60d974e47decae4713dba2e2852c2 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 17 Oct 2018 21:56:06 +0200 Subject: [PATCH 13/90] rel: Fix keyboard text generation --- ttyd-tools/rel/include/keyboard.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ttyd-tools/rel/include/keyboard.h b/ttyd-tools/rel/include/keyboard.h index d25ddd3c..550df684 100644 --- a/ttyd-tools/rel/include/keyboard.h +++ b/ttyd-tools/rel/include/keyboard.h @@ -171,11 +171,15 @@ class Keyboard { if (code >= KeyCode::kA && code <= KeyCode::kZ) { - return static_cast((shift ? 'A' : 'a') + static_cast(code) - static_cast(KeyCode::kA)); + return static_cast(static_cast(code) - static_cast(KeyCode::kA) + (shift ? 'A' : 'a')); } - else if (code >= KeyCode::k0 && code <= KeyCode::k9) + else if (code >= KeyCode::k1 && code <= KeyCode::k9) { - return static_cast('0' + static_cast(code) - static_cast(KeyCode::k0)); + return static_cast(static_cast(code) - static_cast(KeyCode::k1) + '1'); + } + else if (code == KeyCode::k0) + { + return '0'; } else if (code == KeyCode::kMinus) { From 49f63563ef1f1d6a3ed027ff6ead5551c624926f Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 17 Oct 2018 21:56:23 +0200 Subject: [PATCH 14/90] rel: Add heap debugging --- ttyd-tools/rel/include/mod.h | 6 +- ttyd-tools/rel/include/ttyd.eu.lst | 20 ++++++ ttyd-tools/rel/include/ttyd.jp.lst | 20 ++++++ ttyd-tools/rel/include/ttyd.us.lst | 20 ++++++ ttyd-tools/rel/source/mod.cpp | 102 +++++++++++++++++++++++++++-- 5 files changed, 162 insertions(+), 6 deletions(-) diff --git a/ttyd-tools/rel/include/mod.h b/ttyd-tools/rel/include/mod.h index f708c313..ad875a25 100644 --- a/ttyd-tools/rel/include/mod.h +++ b/ttyd-tools/rel/include/mod.h @@ -18,10 +18,14 @@ class Mod void draw(); void processCommand(const char *command); + void updateHeapInfo(); + private: void (*mPFN_makeKey_trampoline)() = nullptr; char mCommandBuffer[256] = ""; - char mDisplayBuffer[256]; + + int mDebugHeapId = -1; + char mDebugHeapText[64]; Keyboard *mKeyboard = nullptr; }; diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index a2d64057..eebad3ba 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -258,6 +258,12 @@ // 8026f850:long2str // 8026faa8:parse_format +// scanf.c +80270138:sscanf +// 802701fc:__StringRead +// 8027028c:__sformatter +// 80270c38:parse_format + // string.c 80271180:strstr 802711e8:strchr @@ -268,6 +274,20 @@ 802713f0:strcpy 802714a8:strlen +// os.a +// OSAlloc.c +// text +// 8029a238:DLInsert +8029a2e4:OSAllocFromHeap +8029a3e0:OSFreeToHeap +8029a45c:OSSetCurrentHeap +8029a46c:OSInitAlloc +8029a4dc:OSCreateHeap +8029a548:OSDestroyHeap +// data +8041f070:OSAlloc_HeapArray +8041f074:OSAlloc_NumHeaps + // si.a 802c194c:SIBusy 802c196c:SIIsChanBusy diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index fe8c2cdf..ce7881be 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -256,6 +256,12 @@ // 80265aac:long2str // 80265d04:parse_format +// scanf.c +80266394:sscanf +// 80266458:__StringRead +// 802664e8:__sformatter +// 80266e94:parse_format + // string.c 802673dc:strchr 8026740c:strncmp @@ -265,6 +271,20 @@ 802675e4:strcpy 8026769c:strlen +// os.a +// OSAlloc.c +// text +// 802903e8:DLInsert +80290494:OSAllocFromHeap +80290590:OSFreeToHeap +8029060c:OSSetCurrentHeap +8029061c:OSInitAlloc +8029068c:OSCreateHeap +802906f8:OSDestroyHeap +// data +80418ea0:OSAlloc_HeapArray +80418ea4:OSAlloc_NumHeaps + // si.a 802b7acc:SIBusy 802b7aec:SIIsChanBusy diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index 6ed4023a..3a950777 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -258,6 +258,12 @@ // 8026ba5c:long2str // 8026bcb4:parse_format +// scanf.c +8026c344:sscanf +// 8026c408:__StringRead +// 8026c498:__sformatter +// 8026ce44:parse_format + // string.c 8026d38c:strstr 8026d3f4:strchr @@ -268,6 +274,20 @@ 8026d5fc:strcpy 8026d6b4:strlen +// os.a +// OSAlloc.c +// text +// 80296400:DLInsert +802964ac:OSAllocFromHeap +802965a8:OSFreeToHeap +80296624:OSSetCurrentHeap +80296634:OSInitAlloc +802966a4:OSCreateHeap +80296710:OSDestroyHeap +// data +8041f070:OSAlloc_HeapArray +8041f074:OSAlloc_NumHeaps + // si.a 802bd7b8:SIBusy 802bd7d8:SIIsChanBusy diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 8e6cd88b..24bd9df0 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -1,5 +1,7 @@ #include "mod.h" +#include "patch.h" + #include #include #include @@ -8,7 +10,7 @@ #include #include -#include "patch.h" +#include #include #include @@ -84,6 +86,8 @@ void Mod::updateEarly() } } + updateHeapInfo(); + // Call original function mPFN_makeKey_trampoline(); } @@ -91,21 +95,109 @@ void Mod::updateEarly() void Mod::draw() { ttyd::mario::Player *player = ttyd::mario::marioGetPtr(); - sprintf(mDisplayBuffer, - "Pos: %.2f %.2f %.2f\r\nSpdY: %.2f\r\nCmd: %s", + char displayBuffer[256]; + sprintf(displayBuffer, + "Pos: %.2f %.2f %.2f\r\n" + "SpdY: %.2f\r\n" + "Cmd: %s\r\n" + "%s", player->playerPosition[0], player->playerPosition[1], player->playerPosition[2], player->wJumpVelocityY, - mCommandBuffer); + mCommandBuffer, + mDebugHeapText); ttyd::fontmgr::FontDrawStart(); uint32_t color = 0xFFFFFFFF; ttyd::fontmgr::FontDrawColor(reinterpret_cast(&color)); ttyd::fontmgr::FontDrawEdge(); - ttyd::fontmgr::FontDrawMessage(-272, -40, mDisplayBuffer); + ttyd::fontmgr::FontDrawMessage(-272, -40, displayBuffer); } void Mod::processCommand(const char *command) { + size_t functionNameLength = strchr(command, ' ') - command; + if (!strncmp(command, "debug_heap", functionNameLength)) + { + // Read heap ID + int targetHeap; + sscanf(command, "debug_heap %d", &targetHeap); + mDebugHeapId = targetHeap; + } +} + +void Mod::updateHeapInfo() +{ + if (mDebugHeapId == -1) + { + mDebugHeapText[0] = '\0'; + return; + } + + if (mDebugHeapId >= gc::os::OSAlloc_NumHeaps || mDebugHeapId < 0) + { + sprintf(mDebugHeapText, "Heap: %d is not a valid heap\r\n", mDebugHeapId); + return; + } + + const gc::os::HeapInfo &heap = gc::os::OSAlloc_HeapArray[mDebugHeapId]; + + // Check heap integrity + bool valid = true; + gc::os::ChunkInfo *currentChunk = nullptr; + gc::os::ChunkInfo *prevChunk = nullptr; + for (currentChunk = heap.firstUsed; currentChunk; currentChunk = currentChunk->next) + { + // Check pointer sanity + auto pointerIsValid = [](void *ptr) + { + uint32_t pointerRaw = reinterpret_cast(ptr); + return pointerRaw >= 0x80000000 && pointerRaw <= 0x817fffff; + }; + if (!pointerIsValid(currentChunk)) + { + valid = false; + break; + } + + // Sanity check size + if (currentChunk->size > 0x17fffff) + { + valid = false; + break; + } + + // Check linked list integrity + if (prevChunk != currentChunk->prev) + { + valid = false; + break; + } + + prevChunk = currentChunk; + } + if (!valid) + { + sprintf(mDebugHeapText, + "Heap: %d corrupt at %08lx\r\n", + mDebugHeapId, + reinterpret_cast(currentChunk)); + return; + } + + // Accumulate used memory + int usage = 0; + int chunks = 0; + for (gc::os::ChunkInfo *chunk = heap.firstUsed; chunk; chunk = chunk->next) + { + usage += chunk->size; + ++chunks; + } + sprintf(mDebugHeapText, + "Heap: id %d, %.2f/%.2fkb, %d cks\r\n", + mDebugHeapId, + usage / 1024.f, + heap.capacity / 1024.f, + chunks); } } \ No newline at end of file From ce76c79389dcf804a26564a7982fd426f5925808 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Thu, 18 Oct 2018 14:35:09 +0200 Subject: [PATCH 15/90] rel: Fix OSAlloc.c data section addresses on EU --- ttyd-tools/rel/include/ttyd.eu.lst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index eebad3ba..65e3c3de 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -285,8 +285,8 @@ 8029a4dc:OSCreateHeap 8029a548:OSDestroyHeap // data -8041f070:OSAlloc_HeapArray -8041f074:OSAlloc_NumHeaps +8042b9e0:OSAlloc_HeapArray +8042b9e4:OSAlloc_NumHeaps // si.a 802c194c:SIBusy From 7747a0c36dafb1d30f8be43bdc821fb4c25aa519 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Thu, 18 Oct 2018 14:37:10 +0200 Subject: [PATCH 16/90] rel: Add missing file /include/gc/os.h --- ttyd-tools/rel/include/gc/os.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 ttyd-tools/rel/include/gc/os.h diff --git a/ttyd-tools/rel/include/gc/os.h b/ttyd-tools/rel/include/gc/os.h new file mode 100644 index 00000000..96466d7f --- /dev/null +++ b/ttyd-tools/rel/include/gc/os.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace gc::os { + +struct ChunkInfo +{ + ChunkInfo *prev; + ChunkInfo *next; + uint32_t size; +} __attribute__((__packed__)); + +struct HeapInfo +{ + uint32_t capacity; + ChunkInfo *firstFree; + ChunkInfo *firstUsed; +} __attribute__((__packed__)); + +extern "C" { + +extern HeapInfo *OSAlloc_HeapArray; +extern int OSAlloc_NumHeaps; + +} + +} \ No newline at end of file From 7f80f090fadea00196aa9f1119cf702aa284cde0 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 23 Jan 2019 18:41:31 +0100 Subject: [PATCH 17/90] gcipack: Properly encode modified time as current time in UTC --- ttyd-tools/gcipack/gcipack.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ttyd-tools/gcipack/gcipack.py b/ttyd-tools/gcipack/gcipack.py index 32bd4628..6b22b19d 100644 --- a/ttyd-tools/gcipack/gcipack.py +++ b/ttyd-tools/gcipack/gcipack.py @@ -3,6 +3,7 @@ import struct import ctypes import math +from datetime import datetime inputFilename = sys.argv[1] inputFile = open(inputFilename, "rb") @@ -44,7 +45,7 @@ struct.pack_into(">B", headerBuffer, 0x06, 0xFF) # unused struct.pack_into(">B", headerBuffer, 0x07, 2) # banner flags (RGB5A3) struct.pack_into("32s", headerBuffer, 0x08, sys.argv[2].encode()) # filename -struct.pack_into(">L", headerBuffer, 0x28, 0) # modified time +struct.pack_into(">L", headerBuffer, 0x28, int((datetime.utcnow() - datetime(2000, 1, 1)).total_seconds())) # modified time struct.pack_into(">L", headerBuffer, 0x2C, 0) # image offset struct.pack_into(">H", headerBuffer, 0x30, 2) # icon format struct.pack_into(">H", headerBuffer, 0x32, 3) # animation speed (1 icon for 12 frames) From 995372cf97de75c868b6436194188e93de86ccf5 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Sat, 27 Apr 2019 00:31:08 +0200 Subject: [PATCH 18/90] rel: Revise seqSetSeq parameters --- ttyd-tools/rel/include/ttyd/seqdrv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ttyd-tools/rel/include/ttyd/seqdrv.h b/ttyd-tools/rel/include/ttyd/seqdrv.h index 44e1a05b..88d0faa3 100644 --- a/ttyd-tools/rel/include/ttyd/seqdrv.h +++ b/ttyd-tools/rel/include/ttyd/seqdrv.h @@ -32,7 +32,7 @@ extern "C" { void seqInit_MARIOSTORY(); void seqMain(); -void seqSetSeq(SeqIndex seq, const char *mapName, const char *beroName); +void seqSetSeq(SeqIndex seq, void *parameter0, void *parameter1); uint32_t seqGetSeq(); uint32_t seqGetPrevSeq(); uint32_t seqGetNextSeq(); From 3ca538c0ad32048dc9729e6d36b8ae4bf9b8a965 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Sat, 27 Apr 2019 00:31:59 +0200 Subject: [PATCH 19/90] rel: Add change_map command --- ttyd-tools/rel/source/mod.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 24bd9df0..53f99be1 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -122,6 +122,20 @@ void Mod::processCommand(const char *command) sscanf(command, "debug_heap %d", &targetHeap); mDebugHeapId = targetHeap; } + else if (!strncmp(command, "change_map", functionNameLength)) + { + static char mapName[32]; + static char beroName[32]; + int readArgumentCount = sscanf(command, "change_map %31s %31s", mapName, beroName); + if (readArgumentCount > 0) + { + if (readArgumentCount != 2) + { + beroName[0] = '\0'; + } + ttyd::seqdrv::seqSetSeq(ttyd::seqdrv::SeqIndex::kMapChange, mapName, beroName); + } + } } void Mod::updateHeapInfo() From df90268da8d4a79535a998e7668f56cece2d39b6 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Mon, 6 May 2019 17:49:04 +0200 Subject: [PATCH 20/90] rel: Add Backspace repeat --- ttyd-tools/rel/include/mod.h | 2 ++ ttyd-tools/rel/source/mod.cpp | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/ttyd-tools/rel/include/mod.h b/ttyd-tools/rel/include/mod.h index ad875a25..afa3c09f 100644 --- a/ttyd-tools/rel/include/mod.h +++ b/ttyd-tools/rel/include/mod.h @@ -22,7 +22,9 @@ class Mod private: void (*mPFN_makeKey_trampoline)() = nullptr; + char mCommandBuffer[256] = ""; + int mBackspaceHoldTimer = 0; int mDebugHeapId = -1; char mDebugHeapText[64]; diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 53f99be1..26d6a11b 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -83,9 +83,25 @@ void Mod::updateEarly() else if (pressed == KeyCode::kBackspace && bufferLen > 0) { mCommandBuffer[--bufferLen] = '\0'; + mBackspaceHoldTimer = 0; } } + // Backspace repeat handling + if (mKeyboard->isKeyReleased(KeyCode::kBackspace)) + { + mBackspaceHoldTimer = 0; + } + if (mKeyboard->isKeyDown(KeyCode::kBackspace)) + { + ++mBackspaceHoldTimer; + } + if (bufferLen > 0 && (mBackspaceHoldTimer >= 60)) + { + // Erase one per frame + mCommandBuffer[--bufferLen] = '\0'; + } + updateHeapInfo(); // Call original function From f853dfd9746fe419b5263658871f2936d0eba7ba Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Mon, 6 May 2019 17:52:05 +0200 Subject: [PATCH 21/90] rel: Fix debug_heap command argument parsing --- ttyd-tools/rel/source/mod.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 26d6a11b..135d9bc7 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -134,7 +134,7 @@ void Mod::processCommand(const char *command) if (!strncmp(command, "debug_heap", functionNameLength)) { // Read heap ID - int targetHeap; + int targetHeap = -1; sscanf(command, "debug_heap %d", &targetHeap); mDebugHeapId = targetHeap; } From 0046137828027f2c256c63542fa92cf691d7c00d Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Mon, 6 May 2019 17:52:53 +0200 Subject: [PATCH 22/90] rel: Add show_ui command --- ttyd-tools/rel/include/mod.h | 2 ++ ttyd-tools/rel/source/mod.cpp | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/ttyd-tools/rel/include/mod.h b/ttyd-tools/rel/include/mod.h index afa3c09f..85850ade 100644 --- a/ttyd-tools/rel/include/mod.h +++ b/ttyd-tools/rel/include/mod.h @@ -26,6 +26,8 @@ class Mod char mCommandBuffer[256] = ""; int mBackspaceHoldTimer = 0; + bool mShowUi = true; + int mDebugHeapId = -1; char mDebugHeapText[64]; diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 135d9bc7..bcf9480f 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -110,6 +110,9 @@ void Mod::updateEarly() void Mod::draw() { + if (!mShowUi) + return; + ttyd::mario::Player *player = ttyd::mario::marioGetPtr(); char displayBuffer[256]; sprintf(displayBuffer, @@ -152,6 +155,12 @@ void Mod::processCommand(const char *command) ttyd::seqdrv::seqSetSeq(ttyd::seqdrv::SeqIndex::kMapChange, mapName, beroName); } } + else if (!strncmp(command, "show_ui", functionNameLength)) + { + int shouldShowUi = 1; + sscanf(command, "show_ui %d", &shouldShowUi); + mShowUi = shouldShowUi; + } } void Mod::updateHeapInfo() From bd8e16c906401aa853ed100adceb30a0fc387c6d Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Thu, 6 Jun 2019 20:19:36 +0200 Subject: [PATCH 23/90] elf2rel: Fix the addend being ignored on relocations against external symbols --- ttyd-tools/elf2rel/elf2rel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ttyd-tools/elf2rel/elf2rel.cpp b/ttyd-tools/elf2rel/elf2rel.cpp index a0a92307..88858807 100644 --- a/ttyd-tools/elf2rel/elf2rel.cpp +++ b/ttyd-tools/elf2rel/elf2rel.cpp @@ -371,7 +371,7 @@ int main(int argc, char **argv) rel.moduleID = 0; rel.targetSection = 0; // #todo-elf2rel: Check if this is important - rel.addend = it->second; + rel.addend = static_cast(addend + it->second); } } From b9b3bebb1f220946fbf598774cbdc41c72fa44c8 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Tue, 13 Aug 2019 00:13:54 +0200 Subject: [PATCH 24/90] rel: Add header and symbols for database.o --- ttyd-tools/rel/include/ttyd.eu.lst | 5 +++++ ttyd-tools/rel/include/ttyd.jp.lst | 5 +++++ ttyd-tools/rel/include/ttyd.us.lst | 5 +++++ ttyd-tools/rel/include/ttyd/database.h | 21 +++++++++++++++++++++ 4 files changed, 36 insertions(+) create mode 100644 ttyd-tools/rel/include/ttyd/database.h diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index 65e3c3de..b833bf2b 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -229,6 +229,11 @@ 80078e98:fontmgrTexSetup 80078ec4:fontmgrInit +// database.o +80086278:setupDataBase +800868e0:setupDataCheck +8008699c:setupDataLoad + // seq_logo.o // 80086d4c:progDisp // unused:arcLoad diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index ce7881be..99dbc636 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -227,6 +227,11 @@ 80076e98:fontmgrTexSetup 8007711c:fontmgrInit +// database.o +80083f9c:setupDataBase +80084604:setupDataCheck +800846c0:setupDataLoad + // seq_logo.o // 80084a6c:progDisp // unused:arcLoad diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index 3a950777..d4a6d6b0 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -229,6 +229,11 @@ 80077bdc:fontmgrTexSetup 80077c08:fontmgrInit +// database.o +80084f24:setupDataBase +8008558c:setupDataCheck +80085648:setupDataLoad + // seq_logo.o // 800859f8:progDisp // unused:arcLoad diff --git a/ttyd-tools/rel/include/ttyd/database.h b/ttyd-tools/rel/include/ttyd/database.h new file mode 100644 index 00000000..ad3d0c52 --- /dev/null +++ b/ttyd-tools/rel/include/ttyd/database.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace ttyd::database { + +struct DatabaseDefinition +{ + const char *name; + int32_t id; +} __attribute__((__packed__)); + +extern "C" { + +void setupDataLoad(const char *mapName); +int32_t setupDataCheck(); +void setupDataBase(const char *areaName, const char *mapName); + +} + +} \ No newline at end of file From 0bd29069922bd9fc7fddc7d1023f440a496c7af6 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Fri, 11 Oct 2019 16:36:29 +0200 Subject: [PATCH 25/90] rel: Refactor console for better look and more flexibility --- ttyd-tools/rel/include/mod.h | 16 +++- ttyd-tools/rel/source/mod.cpp | 169 ++++++++++++++++++++++++---------- 2 files changed, 134 insertions(+), 51 deletions(-) diff --git a/ttyd-tools/rel/include/mod.h b/ttyd-tools/rel/include/mod.h index 85850ade..faa1626e 100644 --- a/ttyd-tools/rel/include/mod.h +++ b/ttyd-tools/rel/include/mod.h @@ -16,17 +16,27 @@ class Mod private: void updateEarly(); void draw(); + + void updateConsole(); void processCommand(const char *command); - void updateHeapInfo(); + + void drawConsole(); + void drawMovementInfo(); + void drawHeapInfo(); private: void (*mPFN_makeKey_trampoline)() = nullptr; - char mCommandBuffer[256] = ""; + bool mShowUi = true; + char mDisplayBuffer[256]; + + bool mShowMovementInfo = false; + + char mCommandBuffer[64] = ""; int mBackspaceHoldTimer = 0; - bool mShowUi = true; + bool mConsoleActive = false; int mDebugHeapId = -1; char mDebugHeapText[64]; diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index bcf9480f..86ac5383 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -53,11 +53,57 @@ void Mod::updateEarly() mKeyboard->update(); // Register draw command - ttyd::dispdrv::dispEntry(ttyd::dispdrv::DisplayLayer::kDebug3d, 0, [](ttyd::dispdrv::DisplayLayer layerId, void *user) + ttyd::dispdrv::dispEntry(ttyd::dispdrv::DisplayLayer::kDebug3d, 1, [](ttyd::dispdrv::DisplayLayer layerId, void *user) { reinterpret_cast(user)->draw(); }, this); + updateConsole(); + updateHeapInfo(); + + // Call original function + mPFN_makeKey_trampoline(); +} + +void Mod::draw() +{ + if (!mShowUi) + return; + + mDisplayBuffer[0] = '\0'; + + drawHeapInfo(); + drawMovementInfo(); + drawConsole(); + + int lineCount = 1; + for (char *p = strchr(mDisplayBuffer, '\n'); p && p[1] != '\0'; p = strchr(p + 1, '\n')) + { + ++lineCount; + } + + ttyd::fontmgr::FontDrawStart(); + uint32_t color = 0x328BFFFF; + ttyd::fontmgr::FontDrawColor(reinterpret_cast(&color)); + ttyd::fontmgr::FontDrawEdge(); + ttyd::fontmgr::FontDrawMessage(-271, -212 + 28 * lineCount, mDisplayBuffer); +} + +void Mod::updateConsole() +{ + // Console shortcut L+Z or Grave + if ((ttyd::system::keyGetButton(0) == 0x0050 + && ttyd::system::keyGetButtonTrg(0) & 0x0050) + || mKeyboard->isKeyPressed(KeyCode::kPlus)) + { + mConsoleActive = !mConsoleActive; + } + + if (!mConsoleActive) + { + return; + } + // Keyboard input for prompt size_t bufferLen = strlen(mCommandBuffer); for (int i = 0; i < mKeyboard->getKeyPressedCount(); ++i) @@ -96,45 +142,33 @@ void Mod::updateEarly() { ++mBackspaceHoldTimer; } - if (bufferLen > 0 && (mBackspaceHoldTimer >= 60)) + if (bufferLen > 0 && (mBackspaceHoldTimer >= 40)) { // Erase one per frame mCommandBuffer[--bufferLen] = '\0'; } - - updateHeapInfo(); - - // Call original function - mPFN_makeKey_trampoline(); -} - -void Mod::draw() -{ - if (!mShowUi) - return; - - ttyd::mario::Player *player = ttyd::mario::marioGetPtr(); - char displayBuffer[256]; - sprintf(displayBuffer, - "Pos: %.2f %.2f %.2f\r\n" - "SpdY: %.2f\r\n" - "Cmd: %s\r\n" - "%s", - player->playerPosition[0], player->playerPosition[1], player->playerPosition[2], - player->wJumpVelocityY, - mCommandBuffer, - mDebugHeapText); - ttyd::fontmgr::FontDrawStart(); - uint32_t color = 0xFFFFFFFF; - ttyd::fontmgr::FontDrawColor(reinterpret_cast(&color)); - ttyd::fontmgr::FontDrawEdge(); - ttyd::fontmgr::FontDrawMessage(-272, -40, displayBuffer); } void Mod::processCommand(const char *command) { size_t functionNameLength = strchr(command, ' ') - command; - if (!strncmp(command, "debug_heap", functionNameLength)) + if (!strncmp(command, "exit", functionNameLength)) + { + mConsoleActive = false; + } + else if (!strncmp(command, "show_ui", functionNameLength)) + { + int shouldShowUi = 1; + sscanf(command, "show_ui %d", &shouldShowUi); + mShowUi = shouldShowUi; + } + else if (!strncmp(command, "show_pos", functionNameLength)) + { + int shouldShowPos = 1; + sscanf(command, "show_pos %d", &shouldShowPos); + mShowMovementInfo = shouldShowPos; + } + else if (!strncmp(command, "debug_heap", functionNameLength)) { // Read heap ID int targetHeap = -1; @@ -155,12 +189,6 @@ void Mod::processCommand(const char *command) ttyd::seqdrv::seqSetSeq(ttyd::seqdrv::SeqIndex::kMapChange, mapName, beroName); } } - else if (!strncmp(command, "show_ui", functionNameLength)) - { - int shouldShowUi = 1; - sscanf(command, "show_ui %d", &shouldShowUi); - mShowUi = shouldShowUi; - } } void Mod::updateHeapInfo() @@ -173,7 +201,7 @@ void Mod::updateHeapInfo() if (mDebugHeapId >= gc::os::OSAlloc_NumHeaps || mDebugHeapId < 0) { - sprintf(mDebugHeapText, "Heap: %d is not a valid heap\r\n", mDebugHeapId); + sprintf(mDebugHeapText, "Heap: %d is not a valid heap\n", mDebugHeapId); return; } @@ -215,10 +243,12 @@ void Mod::updateHeapInfo() } if (!valid) { - sprintf(mDebugHeapText, - "Heap: %d corrupt at %08lx\r\n", - mDebugHeapId, - reinterpret_cast(currentChunk)); + sprintf( + mDebugHeapText, + "Heap: %d corrupt at %08lx\n", + mDebugHeapId, + reinterpret_cast(currentChunk) + ); return; } @@ -231,12 +261,55 @@ void Mod::updateHeapInfo() ++chunks; } - sprintf(mDebugHeapText, - "Heap: id %d, %.2f/%.2fkb, %d cks\r\n", - mDebugHeapId, - usage / 1024.f, - heap.capacity / 1024.f, - chunks); + sprintf( + mDebugHeapText, + "Heap: id %d, %.2f/%.2fkb, %d cks\n", + mDebugHeapId, + usage / 1024.f, + heap.capacity / 1024.f, + chunks + ); +} + +void Mod::drawConsole() +{ + if (!mConsoleActive) + { + return; + } + + strcat(mDisplayBuffer, "$ "); + strcat(mDisplayBuffer, mCommandBuffer); + strcat(mDisplayBuffer, "\n"); +} + +void Mod::drawMovementInfo() +{ + if (!mShowMovementInfo) + { + return; + } + + char text[64]; + ttyd::mario::Player *player = ttyd::mario::marioGetPtr(); + sprintf( + text, + "Pos: %.2f %.2f %.2f\n" + "SpdY: %.2f\n", + player->playerPosition[0], player->playerPosition[1], player->playerPosition[2], + player->wJumpVelocityY + ); + strcat(mDisplayBuffer, text); +} + +void Mod::drawHeapInfo() +{ + if (mDebugHeapId == -1) + { + return; + } + + strcat(mDisplayBuffer, mDebugHeapText); } } \ No newline at end of file From de7302f2a05f3e16530008594d71e55f8555de37 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Fri, 11 Oct 2019 17:02:09 +0200 Subject: [PATCH 26/90] rel: Revise dispdrv declarations --- ttyd-tools/rel/include/ttyd/dispdrv.h | 12 ++++++------ ttyd-tools/rel/source/mod.cpp | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ttyd-tools/rel/include/ttyd/dispdrv.h b/ttyd-tools/rel/include/ttyd/dispdrv.h index a0f233c4..501749fd 100644 --- a/ttyd-tools/rel/include/ttyd/dispdrv.h +++ b/ttyd-tools/rel/include/ttyd/dispdrv.h @@ -4,7 +4,7 @@ namespace ttyd::dispdrv { -enum class DisplayLayer : uint8_t +enum class CameraId : uint8_t { kOffscreen = 0, kOffscreen2, @@ -21,14 +21,14 @@ enum class DisplayLayer : uint8_t kDebug3d, }; -typedef void (*PFN_dispCallback)(DisplayLayer layerId, void *user); +typedef void (*PFN_dispCallback)(CameraId cameraId, void *user); struct DisplayWork { - DisplayLayer layer; + CameraId cameraId; uint8_t renderMode; uint16_t padding_2; - float unk_4; + float order; PFN_dispCallback callback; void *user; } __attribute__((__packed__)); @@ -37,9 +37,9 @@ extern "C" { void dispInit(); void dispReInit(); -void dispEntry(DisplayLayer layerId, uint8_t renderMode, PFN_dispCallback callback, void *user); +void dispEntry(CameraId cameraId, uint8_t renderMode, float order, PFN_dispCallback callback, void *user); void dispSort(); -void dispDraw(DisplayLayer layerId); +void dispDraw(CameraId cameraId); // float dispCalcZ(void *vecUnk); DisplayWork *dispGetCurWork(); diff --git a/ttyd-tools/rel/source/mod.cpp b/ttyd-tools/rel/source/mod.cpp index 86ac5383..094da5ec 100644 --- a/ttyd-tools/rel/source/mod.cpp +++ b/ttyd-tools/rel/source/mod.cpp @@ -53,7 +53,7 @@ void Mod::updateEarly() mKeyboard->update(); // Register draw command - ttyd::dispdrv::dispEntry(ttyd::dispdrv::DisplayLayer::kDebug3d, 1, [](ttyd::dispdrv::DisplayLayer layerId, void *user) + ttyd::dispdrv::dispEntry(ttyd::dispdrv::CameraId::kDebug3d, 1, 0.f, [](ttyd::dispdrv::CameraId layerId, void *user) { reinterpret_cast(user)->draw(); }, this); From a07cacb912ffda2e4b7fe87dccb3f8beab1b25df Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Thu, 17 Oct 2019 23:40:47 +0200 Subject: [PATCH 27/90] Add blender_io_ttyd --- README.md | 16 +- ttyd-tools/blender_io_ttyd/README.md | 66 + .../blender_io_ttyd/io_scene_ttyd/__init__.py | 148 ++ .../io_scene_ttyd/camera_road.py | 499 +++++ .../blender_io_ttyd/io_scene_ttyd/dmd.py | 1730 +++++++++++++++++ .../io_scene_ttyd/export_ttyd.py | 45 + .../blender_io_ttyd/io_scene_ttyd/tpl.py | 492 +++++ .../blender_io_ttyd/io_scene_ttyd/util.py | 163 ++ 8 files changed, 3156 insertions(+), 3 deletions(-) create mode 100644 ttyd-tools/blender_io_ttyd/README.md create mode 100644 ttyd-tools/blender_io_ttyd/io_scene_ttyd/__init__.py create mode 100644 ttyd-tools/blender_io_ttyd/io_scene_ttyd/camera_road.py create mode 100644 ttyd-tools/blender_io_ttyd/io_scene_ttyd/dmd.py create mode 100644 ttyd-tools/blender_io_ttyd/io_scene_ttyd/export_ttyd.py create mode 100644 ttyd-tools/blender_io_ttyd/io_scene_ttyd/tpl.py create mode 100644 ttyd-tools/blender_io_ttyd/io_scene_ttyd/util.py diff --git a/README.md b/README.md index 626eab36..be105196 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,17 @@ +# Tools for Paper Mario: The Thousand Year Door + +## Credits + * Technical assistance and additional reverse engineering by **JasperRLZ** + * Reverse engineering with focus on the battle system by **Jdaster64** + * Tool implementation and reverse engineering by **PistonMiner** + * Debugging assistance and reverse engineering by **Zephiles** + +## Contents This contains various tools built for Paper Mario: The Thousand Year Door, some of which are applicable to other GameCube games as well. + * blender_io_ttyd: Blender exporter for map files + * rel: Framework for writing code on-top of existing GameCube games * docs: Various pieces of documentation - * ttydasm: Disassembler for the event command scripting language - * rellink: REL file linking tool * elf2rel: Convert from ELF file to REL file + * ttydasm: Disassembler for the event command scripting language * gcipack: Pack a file into a GCI file to be loaded off memory card - * rel: Framework for writing code on-top of existing GameCube games \ No newline at end of file + * rellink: REL file linking tool \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/README.md b/ttyd-tools/blender_io_ttyd/README.md new file mode 100644 index 00000000..1d7b8459 --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/README.md @@ -0,0 +1,66 @@ +# Blender exporter for Paper Mario: The Thousand Year Door + +This tool and this documentation is a work-in-progress. + +If after reading this document you have unanswered questions, there is a dedicated [Paper Mario TTYD Modding Discord server](https://discord.gg/geVf9UK) that you can join. + +## Installation +This addon was tested with Blender 2.80. +Copy the folder `io_scene_ttyd` in this directory into the `scripts/addons/` directory of your Blender installation, then enable it under *Edit*, *Preferences...*, *Add-ons*. + +## Playtesting +To actually playtest your map in-game, you can use a tool like GCRebuilder to place the exported map files (`d`, `c`, `t`) in a subfolder of the disc image's `m` folder. +You can either replace an existing map which you can then just go to in the game, or you can put it in as a completely new map. If you elect to go with the second option, you can use e.g. a REL GCI mod to load the map up dynamically, such as the one in the `rel` folder of this repository. + +## Usage +Different Blender collections are used to map objects to different functions in the map and must be selected accordingly when exporting. All collections must be present even if unused. Objects should not be in more than one of these functional collections. + +### Map and Hit +The Map and Hit collections contain render and collision geometry contained in the main data file (`d`). The parent-child hierarchy will be reflected in the exported joint graph, so objects contained should either not be parented or be parented to another object in the same collection. +I recommend you use linked objects (Alt-D by default) to reduce the amount of duplicated objects between Map and Hit collections. + +#### Materials +Blender material node trees will be converted to TTYD materials on a best-effort basis. If a Blender material does not map to a TTYD material, it will not be converted. In order to map to a valid TTYD material, one of the following setups must be used. + +##### Shader setup +The shader output must be connected to either +* a Diffuse Shader if there is no transparency [Example](https://i.imgur.com/Z4h8Nsi.png) +* a Mix Shader of a Transparent Shader and a Diffuse Shader if there is transparency [Example](https://i.imgur.com/AB4ON5f.png) +The Color input of the Diffuse Shader node is the color source, with the Fac input of the Mix Shader being the alpha source if present. +If the material is opaque, then the color input may be left unconnected and set to a constant color, terminating the material. [Example](https://i.imgur.com/aIR6laH.png) + +##### Vertex color setups +If vertex colors are to be used, the color input may be connected to +* an Attribute node mapping to a vertex color. In this case, the node tree ends here and the material must be opaque. [Example](https://i.imgur.com/Gq6OI95.png) +* a MixRGB node set to Multiply with it's Fac input set to 1.0, with one of the Color sockets connected to an Attribute node mapping to a vertex color. In this case, the Color socket that is not connected to an Attribute node becomes the new color input. [Example](https://i.imgur.com/gOnshfr.png) +Note that at this time, no alpha channel is supported on vertex colors due to a limitation in Blender 2.80. + +##### TEV setups +The color and alpha inputs must be connected to + * a single sampler setup. +In future, more TEV modes with more complicated multi-material blending functions will be supported here. + +##### Samplers +Every sampler setup in the above TEV setups must end in an Image Texture node. The Image Texture node's Vector input may optionally be connected to a Mapping Node specifying a texture coordinate transform for that sampler. The Vector input of that mapping node may optionally be connected to a UV Map node specifying which UV map to use for that sampler. The Image Texture node's Vector input may also be directly connected to a UV Map node without a Mapping node. [Example](https://i.imgur.com/jHmvPse.png) + +##### Blend mode +The Blend Mode set in the *Settings* panel of the Eevee material settings maps to the TTYD blend mode. Opaque, Alpha-Clip and Alpha-Blend are supported. If the material has transparency, the Blend Mode must be set to either Alpha Blend or Alpha Clip. + +#### Animations +Map object transform and texture coordinate transform animations are currently supported. To export an animation, push all the relevant Blender animation data into Actions and put them into NLA Tracks. All NLA tracks of the same name will be packed into one TTYD animation. + +### Camera +The Camera collection contains the camera marker meshes and curves that will end up in the camera road file (`c`). +A camera curve defines the path the camera should follow in order to track Mario and map to Blender curve objects. There is exactly one curve active at any one time. A curve is considered to be active when any of the markers attached to it is the one Mario is standing in. +Markers are meshes that define "zones" of which curves should be active depending on where Mario stands. The first marker mesh that is hit when projecting Mario's position downwards is considered active. This usually just means that you should put the marker slightly below where Mario should be standing in order to activate it. +Every curve must have at least one marker. To define the markers for a curve, add custom properties on the Blender object (the object data-block, **not** the Curve data-block) named "marker0", "marker1", etc. with the names of the marker objects for that curve. Only "marker0" is required. +TODO: Other curve properties + +## Known limitations +* No support for: + * Lights + * Fog + * Different TEV modes + * Hit attributes + * Different texture formats (partially implemented) + * Material blend alpha (partially implemented) \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/io_scene_ttyd/__init__.py b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/__init__.py new file mode 100644 index 00000000..6b707b3f --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/__init__.py @@ -0,0 +1,148 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright 2019 Linus S. (aka PistonMiner) + +import bpy +from bpy.props import ( + StringProperty, + EnumProperty +) +from bpy_extras.io_utils import ( + ExportHelper, + axis_conversion +) + +import os.path + +from . import export_ttyd + +def find_collection_by_name(scene, name): + collections_left = [scene.collection] + while len(collections_left) > 0: + c = collections_left.pop(0) + if c.name == name: + return c + collections_left.extend(c.children) + return None + + +def enum_collections(self, context): + if context is None: + return [] + + # todo-blender_io_ttyd: Rework this + collections = [] + collections_left = [context.scene.collection] + while len(collections_left): + collection = collections_left.pop(0) + child_collections = [x for x in collection.children] + collections_left.extend(child_collections) + collections.extend(child_collections) + + items = [] + for c in collections: + items.append((c.name, c.name, "")) + return items + +def enum_root_objects(self, context): + if context is None: + return [] + + items = [] + for obj in context.scene.objects: + if obj.parent == None: + items.append((obj.name, obj.name, "")) + return items + +def find_object_by_name(scene, name): + for object in scene.objects: + if object.name == name: + return object + return None + +bl_info = { + "name": "Paper Mario TTYD Map Data File", + "author": "PistonMiner (Linus S.)", + "version": (1, 0, 0), + "blender": (2, 80, 0), + "location": "File > Import-Export", + "description": "Import-Export as Paper Mario TTYD Map Data File", + "tracker_url": "https://github.com/PistonMiner/ttyd-tools/issues/", + "support": "COMMUNITY", + "category": "Import-Export", +} + +class ExportTTYDMap(bpy.types.Operator, ExportHelper): + bl_idname = "export_scene.ttyd_map" + bl_label = "Export TTYD map" + bl_options = {'UNDO', 'PRESET'} + + filename_ext = "" + # todo-blender_io_ttyd: The GLTF2 addon still uses = instead of : here; it + # gave a warning for me. Double-check that this + # syntax is right. + filter_glob: StringProperty(default="d;c;t", options={'HIDDEN'}) + + map_root: EnumProperty( + name="Map Collection", + items=enum_collections + ) + hit_root: EnumProperty( + name="Hit Collection", + items=enum_collections + ) + camera_road_root: EnumProperty( + name="Camera Road Collection", + items=enum_collections + ) + + """def draw(self, context): + layout = self.layout + + row = layout.row(align=True) + row.prop(self, "map_collection")""" + + def execute(self, context): + settings = {} + settings['root_path'] = os.path.dirname(self.filepath) + map_root = find_collection_by_name(context.scene, self.map_root) + hit_root = find_collection_by_name(context.scene, self.hit_root) + camera_road_root = find_collection_by_name(context.scene, self.camera_road_root) + + collection_list = [ + map_root, + hit_root, + camera_road_root + ] + if len(collection_list) != len(set(collection_list)): + self.report({'ERROR'}, "Map/Hit/Camera Road collections must not be the same!") + return {'CANCELLED'} + + settings["map_root"] = map_root + settings["hit_root"] = hit_root + settings["camera_road_root"] = camera_road_root + + settings["axis_conversion_matrix"] = axis_conversion( + to_forward='-Z', + to_up='Y' + ).to_4x4() + return export_ttyd.export(context, settings) + + +def menu_func_export_map(self, context): + self.layout.operator(ExportTTYDMap.bl_idname, text="MarioSt Map (d/c/t)") + +classes = ( + ExportTTYDMap, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + bpy.types.TOPBAR_MT_file_export.append(menu_func_export_map) + +def unregister(): + for cls in reversed(classes): + bpy.utils.unregister_class(cls) + + bpy.types.TOPBAR_MT_file_export.remove(menu_func_export_map) \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/io_scene_ttyd/camera_road.py b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/camera_road.py new file mode 100644 index 00000000..4427c453 --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/camera_road.py @@ -0,0 +1,499 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright 2019 Linus S. (aka PistonMiner) + +import math +import datetime + +import bpy +import mathutils + +from .util import * + +class CameraRoadMarker: + def __init__(self): + self.name = "" + self.polygons = [] + self.vertex_indices = [] + self.vertex_positions = [] + + def link(self, linker): + marker_blob_name = "markers:" + str(linker.get_uid()) + marker_data = bytearray(0x68) + + struct.pack_into(">64s", marker_data, 0x00, self.name.encode("shift_jis")) + + # Bounding box + bbox = get_bbox(self.vertex_positions) + assert(bbox != None) + struct.pack_into( + ">ffffff", + marker_data, + 0x40, + bbox[0][0], + bbox[0][1], + bbox[0][2], + bbox[1][0], + bbox[1][1], + bbox[1][2] + ) + + vertex_position_base_index = linker.get_section_blob_count("vertex_positions") + vertex_position_blob_name_prefix = "vertex_positions:{}:".format(linker.get_uid()) + for i, vertex_position in enumerate(self.vertex_positions): + vertex_position_blob_name = vertex_position_blob_name_prefix + str(i) + vertex_position_data = bytearray(0xc) + struct.pack_into( + ">fff", + vertex_position_data, + 0x0, + vertex_position[0], + vertex_position[1], + vertex_position[2] + ) + linker.add_blob(vertex_position_blob_name, vertex_position_data) + linker.place_blob_in_section(vertex_position_blob_name, "vertex_positions") + + struct.pack_into(">L", marker_data, 0x58, vertex_position_base_index) + struct.pack_into(">L", marker_data, 0x5c, len(self.vertex_positions)) + + vertex_index_base_index = linker.get_section_blob_count("vertex_indices") + vertex_index_blob_name_prefix = "indices:{}:".format(linker.get_uid()) + for i, vertex_index in enumerate(self.vertex_indices): + vertex_index_blob_name = vertex_index_blob_name_prefix + str(i) + vertex_index_data = bytearray(0x4) + + struct.pack_into(">L", vertex_index_data, 0x0, vertex_index) + + linker.add_blob(vertex_index_blob_name, vertex_index_data) + linker.place_blob_in_section(vertex_index_blob_name, "vertex_indices") + + polygon_base_index = linker.get_section_blob_count("polygons") + polygon_blob_name_prefix = "polygons:{}:".format(linker.get_uid()) + for i, polygon in enumerate(self.polygons): + polygon_blob_name = polygon_blob_name_prefix + str(i) + polygon_data = bytearray(0x8) + + polygon_start_index = vertex_index_base_index + polygon[0] + struct.pack_into(">L", polygon_data, 0x0, polygon_start_index) + struct.pack_into(">L", polygon_data, 0x4, polygon[1]) + + linker.add_blob(polygon_blob_name, polygon_data) + linker.place_blob_in_section(polygon_blob_name, "polygons") + + struct.pack_into(">L", marker_data, 0x60, polygon_base_index) + struct.pack_into(">L", marker_data, 0x64, len(self.polygons)) + + linker.add_blob(marker_blob_name, marker_data) + linker.place_blob_in_section(marker_blob_name, "markers") + + return marker_blob_name + + @staticmethod + def from_blender_object(blender_object, global_matrix = None): + if blender_object.type != 'MESH': + return None + + marker = CameraRoadMarker() + + marker_vertex_positions = [] + marker_vertex_indices = [] + marker_polygons = [] + + blender_depsgraph = bpy.context.evaluated_depsgraph_get() + blender_evaluated_object = blender_object.evaluated_get(blender_depsgraph) + blender_mesh = blender_evaluated_object.to_mesh() + blender_index_map = {} + + def assign_index_from_blender_index(blender_index): + if blender_index in blender_index_map: + return blender_index_map[blender_index] + + # Not in buffer, add. + new_index = len(marker_vertex_positions) + vertex_position = blender_mesh.vertices[blender_index].co + vertex_position = blender_object.matrix_world @ vertex_position + if global_matrix != None: + vertex_position = global_matrix @ vertex_position + marker_vertex_positions.append(tuple(vertex_position)) + blender_index_map[blender_index] = new_index + return new_index + + # todo-blender_io_ttyd: This shares a lot with + # DmdModel.list_from_blender_mesh, see about extracting some common + # code. + for blender_polygon in blender_mesh.polygons: + loop_indices = [i for i in blender_polygon.loop_indices] + loop_index_polygons = [] + if len(loop_indices) == 3: + loop_index_polygons.append(loop_indices[:]) + elif len(loop_indices) > 3: + polyline_vertices = [] + for loop_index in loop_indices: + loop = blender_mesh.loops[loop_index] + loop_vertex = blender_mesh.vertices[loop.vertex_index] + polyline_vertices.append(loop_vertex.co) + tessellated_triangles = mathutils.geometry.tessellate_polygon([polyline_vertices]) + for tri in tessellated_triangles: + tri_loop_indices = [loop_indices[i] for i in tri] + loop_index_polygons.append(tri) + + for loop_index_polygon in loop_index_polygons: + polygon_start = len(marker_vertex_indices) + polygon_size = 0 + for loop_index in loop_index_polygon: + loop = blender_mesh.loops[loop_index] + blender_vertex_index = loop.vertex_index + vertex_index = assign_index_from_blender_index(blender_vertex_index) + marker_vertex_indices.append(vertex_index) + polygon_size += 1 + + marker_polygons.append((polygon_start, polygon_size)) + + blender_evaluated_object.to_mesh_clear() + + marker.polygons = marker_polygons + marker.vertex_indices = marker_vertex_indices + marker.vertex_positions = marker_vertex_positions + + return marker + +class CameraRoadCurve: + def __init__(self): + self.name = "" + self.points = [] + self.markers = [] + + # Whether the curve should be extended past the clamp start + # (necessary if the player may stand outside of the curve area) + self.extend = True + + # These vary a lot throughout TTYD. I've chosen values based on the + # original maps which seem to me like the default values. + self.should_lock_y = False + self.lock_y_value = 0.0 + self.disabled = False + + self.should_clamp = 1 + self.clamp_distance_left = 20.0 + self.clamp_distance_right = 20.0 + + self.target_distance = 55.0 + self.elevation = 16.0 + self.pitch = -5.0 + + self.shift_x_rate = 20.0 + + def link(self, linker): + curve_blob_name = "curves.{}".format(self.name) + curve_data = bytearray(0xb8) + struct.pack_into(">32s", curve_data, 0x00, self.name.encode("shift_jis")) + struct.pack_into(">L", curve_data, 0x20, self.should_lock_y) # Lock Y? + struct.pack_into(">f", curve_data, 0x24, self.lock_y_value) # Value to lock Y at + + # Maximum distance the camera is allowed to travel left and right of the curve + struct.pack_into(">f", curve_data, 0x48, self.clamp_distance_left) + struct.pack_into(">f", curve_data, 0x4c, self.clamp_distance_right) + + # Distance from camera to target + struct.pack_into(">f", curve_data, 0x58, self.target_distance) + + # Camera elevation/pitch (degrees) + struct.pack_into(">f", curve_data, 0x64, self.elevation) + struct.pack_into(">f", curve_data, 0x70, self.pitch) + + # Shift X rate (how far/fast the camera slides ahead of the player + # when traveling on the X-axis) + struct.pack_into(">f", curve_data, 0x7c, self.shift_x_rate) + + # Enable clamping? + struct.pack_into(">L", curve_data, 0x84, self.should_clamp) + + # Curve data + assert(len(self.points)) + curve_data_name_prefix = "curve_data:{}:".format(linker.get_uid()) + curve_data_base_index = linker.get_section_blob_count("curve_data") + curve_data_count = 0 + points = self.points + + # Bounding box (excludes extended points on purpose) + marker_bboxes = [get_bbox(m.vertex_positions) for m in self.markers] + bbox = merge_bboxes(marker_bboxes) + assert(bbox != None) + struct.pack_into( + ">ffffff", + curve_data, + 0x88, + bbox[0][0], bbox[0][1], bbox[0][2], bbox[1][0], bbox[1][1], bbox[1][2] + ) + + # Calculate progress for points + def point_distance(lhs, rhs): + return math.sqrt(sum([(lhs[i] - rhs[i]) ** 2 for i in range(2)])) + curve_length = 0.0 + point_lengths = [0.0] + for i in range(1, len(points)): + curve_length += point_distance(points[i - 1][0], points[i][0]) + point_lengths.append(curve_length) + point_progresses = [length / curve_length for length in point_lengths] + + # Extend curve along normals at start and end + if self.extend: + def extend_from_point(point, length): + tangent = point[1] + position = tuple([point[0][i] + point[1][i] * length for i in range(2)]) + return (position, tangent) + + # Length looks constant based on original exporter data + extend_length = 10000 + start_point = extend_from_point(points[0], -extend_length) + end_point = extend_from_point(points[-1], extend_length) + + points = [start_point] + points[:] + [end_point] + point_progresses = [-1.0] + point_progresses + [2.0] + + start_clamp_index = 1 + end_clamp_index = len(points) - 2 + else: + start_clamp_index = 0 + end_clamp_index = len(points) - 1 + + # Start/end clamp segment index + struct.pack_into(">L", curve_data, 0x40, start_clamp_index) + struct.pack_into(">L", curve_data, 0x44, end_clamp_index) + + struct.pack_into(">f", curve_data, 0x50, 0.0) # Start clamp progress + struct.pack_into(">f", curve_data, 0x54, 1.0) # End clamp progress + + # Positions, then tangents + for attribute_index in range(2): + for point_index, point in enumerate(points): + curve_data_blob_name = curve_data_name_prefix + str(curve_data_count) + curve_data_data = bytearray(0xc) # Horrendous naming, I know, but it has to be. + + struct.pack_into(">f", curve_data_data, 0x0, point[attribute_index][0]) + + # Add progress in Y-field for tangent + if attribute_index == 1: + struct.pack_into(">f", curve_data_data, 0x4, point_progresses[point_index]) + + struct.pack_into(">f", curve_data_data, 0x8, point[attribute_index][1]) + + linker.add_blob(curve_data_blob_name, curve_data_data) + linker.place_blob_in_section(curve_data_blob_name, "curve_data") + curve_data_count += 1 + + struct.pack_into(">L", curve_data, 0xa0, curve_data_base_index) + struct.pack_into(">L", curve_data, 0xa4, curve_data_count) + + # Markers + marker_base_index = linker.get_section_blob_count("markers") + for marker in self.markers: + marker.link(linker) + struct.pack_into(">L", curve_data, 0xa8, marker_base_index) + struct.pack_into(">L", curve_data, 0xac, len(self.markers)) + + linker.add_blob(curve_blob_name, curve_data) + linker.place_blob_in_section(curve_blob_name, "curves") + + return curve_blob_name + + @staticmethod + def from_blender_object(blender_object, camera_road_collection, global_matrix = None): + if blender_object.type != 'CURVE': + return None + + curve = CameraRoadCurve() + curve.name = blender_object.name + + # Parse user properties + # todo-blender_io_ttyd: This is not the cleanest way to do this. + # Potentially find a better way to do this neatly. + def read_property(property_name, type): + # Read a property only if it exists and is convertible + if property_name not in blender_object: + return + + property_value = blender_object[property_name] + + try: + cast_property_value = type(property_value) + except ValueError: + return + + setattr(curve, property_name, cast_property_value) + + read_property("extend", bool) + + read_property("should_lock_y", bool) + read_property("lock_y_value", float) + read_property("disabled", bool) + + read_property("should_clamp", bool) + read_property("clamp_distance_left", float) + read_property("clamp_distance_right", float) + + read_property("target_distance", float) + read_property("elevation", float) + read_property("pitch", float) + read_property("shift_x_rate", float) + + blender_curve = blender_object.data + + assert(len(blender_curve.splines) == 1) + spline = blender_curve.splines[0] + + # Calculate point positions and tangents + curve.points = [] + assert(len(spline.points) > 1) + for i in range(len(spline.points)): + # Figure out surrounding points for tangent + current_point = spline.points[i] + if i > 0: + previous_point = spline.points[i - 1] + else: + previous_point = None + if i < len(spline.points) - 1: + next_point = spline.points[i + 1] + else: + next_point = None + + # Calculate tangent + if previous_point == None: + assert(next_point != None) + tangent = next_point.co - current_point.co + elif next_point == None: + assert(previous_point != None) + tangent = current_point.co - previous_point.co + else: + tangent_in = current_point.co - previous_point.co + tangent_out = next_point.co - current_point.co + tangent = tangent_in * 0.5 + tangent_out * 0.5 + + # Finalize data + position = current_point.co + position = blender_object.matrix_world @ position + tangent = blender_object.matrix_world @ tangent + if global_matrix != None: + position = global_matrix @ position + tangent = global_matrix @ tangent + + # Project onto XZ-plane + position[1] = 0.0 + tangent[1] = 0.0 + tangent = tangent.normalized() + + position_2d = (position[0], position[2]) + tangent_2d = (tangent[0], tangent[2]) + curve.points.append((position_2d, tangent_2d)) + + # Markers + curve.markers = [] + marker_index = 0 + while True: + marker_property_name = "marker{}".format(marker_index) + if not marker_property_name in blender_object: + break + + marker_name = blender_object[marker_property_name] + assert(isinstance(marker_name, str)) + assert(marker_name in camera_road_collection.all_objects) + marker_object = camera_road_collection.all_objects[marker_name] + marker = CameraRoadMarker.from_blender_object(marker_object, global_matrix) + curve.markers.append(marker) + + marker_index += 1 + + assert(len(curve.markers) > 0) + + return curve + +class CameraRoadFile: + def __init__(self): + self.curves = [] + pass + + @staticmethod + def from_blender_scene(blender_scene, settings): + file = CameraRoadFile() + + if "axis_conversion_matrix" in settings: + global_matrix = settings["axis_conversion_matrix"] + else: + global_matrix = None + + file.curves = [] + for object in settings["camera_road_root"].all_objects: + if object.type != "CURVE": + continue + curve = CameraRoadCurve.from_blender_object( + object, + settings["camera_road_root"], + global_matrix + ) + file.curves.append(curve) + + return file + + def serialize(self): + linker = Linker() + + header_blob_name = "header" + header_data = bytearray(0x10c) + struct.pack_into(">64s", header_data, 0x004, "MarioSt_CameraRoadExport".encode()) + struct.pack_into(">64s", header_data, 0x044, "1.01".encode()) # version + date_text = datetime.datetime.utcnow().strftime("%Y/%m/%d") + struct.pack_into(">64s", header_data, 0x084, date_text.encode()) + + camera_parameter_blob_name = "camera_parameters" + camera_parameter_data = bytearray(0xc) + # fov/near/far + # These values don't change the actual camera parameters, but at least + # the FoV is used in camera shift calculations to figure out how much + # to shift the camera, so these should be correct. TTYD uses 25 degree + # FoV. + struct.pack_into(">f", camera_parameter_data, 0x0, 25.0) + struct.pack_into(">f", camera_parameter_data, 0x4, 0.01) + struct.pack_into(">f", camera_parameter_data, 0x8, 1000.0) + linker.add_blob(camera_parameter_blob_name, camera_parameter_data) + linker.place_blob_in_section(camera_parameter_blob_name, "camera_parameters") + + for curve in self.curves: + curve_blob_name = curve.link(linker) + + # Place sections apart from header now so addresses are available + linker.place_section_at("camera_parameters", len(header_data)) + linker.place_section("curves") + linker.place_section("markers") + linker.place_section("polygons") + linker.place_section("curve_data") + linker.place_section("vertex_positions") + linker.place_section("vertex_indices") + + # Finish header + header_data_section_names = [ + "camera_parameters", + "curves", + "markers", + "polygons", + "curve_data", + "vertex_positions", + "vertex_indices" + ] + for i, section_name in enumerate(header_data_section_names): + entry_count = linker.get_section_blob_count(section_name) + struct.pack_into(">L", header_data, 0xc4 + i * 4, entry_count) + section_address = linker.get_section_address(section_name) + struct.pack_into(">L", header_data, 0xe8 + i * 4, section_address) + + linker.add_blob(header_blob_name, header_data) + linker.place_blob_in_section(header_blob_name, "header") + + # Finalize data + linker.place_section_at("header", 0x0) + assert(linker.resolve_relocations()) + linked_data = linker.serialize() + + # Fill size field + struct.pack_into(">L", linked_data, 0x0, len(linked_data)) + + return linked_data \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/io_scene_ttyd/dmd.py b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/dmd.py new file mode 100644 index 00000000..efb94fbe --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/dmd.py @@ -0,0 +1,1730 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright 2019 Linus S. (aka PistonMiner) + +from collections import defaultdict +import struct +import math +import datetime + +import bpy +import mathutils + +from .util import * + +def linear_to_srgb(linear): + # Linear to sRGB conversion excluding alpha channel if present + out_components = [] + for linear_component in linear[:3]: + if linear_component <= 0.0031308: + component = 12.92 * linear_component + else: + component = 1.055 * linear_component ** (1/2.4) - 0.055 + out_components.append(component) + out_components += linear[3:] + return tuple(out_components) + +class DmdLinker(Linker): + def add_string(self, source_name, source_offset, text, section_name = "strings"): + # todo-blender_io_ttyd: Investigate effects of string deduplication + blob_name = "{}:{}".format(section_name, self.get_uid()) + encoded_string = text.encode("shift_jis") + b"\x00" + self.add_blob(blob_name, encoded_string) + self.place_blob_in_section(blob_name, section_name) + self.add_relocation(source_name, source_offset, blob_name) + +# DMD vertex attribute IDs +VERTEX_ATTRIBUTE_POSITION_ID = "position" +VERTEX_ATTRIBUTE_NORMAL_ID = "normal" +VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX = "texcoord" +VERTEX_ATTRIBUTE_COLOR_ID_PREFIX = "color" + +VERTEX_ATTRIBUTE_TEXCOORD_MAX_COUNT = 8 +VERTEX_ATTRIBUTE_COLOR_MAX_COUNT = 2 # Technically two but color buffer 1 support is broken + +# Order in which indices are packed into the individual vertices +VERTEX_ATTRIBUTE_INDEX_ORDER = [] +VERTEX_ATTRIBUTE_INDEX_ORDER.append(VERTEX_ATTRIBUTE_POSITION_ID) +VERTEX_ATTRIBUTE_INDEX_ORDER.append(VERTEX_ATTRIBUTE_NORMAL_ID) +for i in range(VERTEX_ATTRIBUTE_COLOR_MAX_COUNT): + VERTEX_ATTRIBUTE_INDEX_ORDER.append(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX + str(i)) +for i in range(VERTEX_ATTRIBUTE_TEXCOORD_MAX_COUNT): + VERTEX_ATTRIBUTE_INDEX_ORDER.append(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX + str(i)) + +# Order in which the data referenced by the indices is packed into the data sections +VERTEX_ATTRIBUTE_DATA_ORDER = [] +VERTEX_ATTRIBUTE_DATA_ORDER.append(VERTEX_ATTRIBUTE_POSITION_ID) +VERTEX_ATTRIBUTE_DATA_ORDER.append(VERTEX_ATTRIBUTE_NORMAL_ID) +for i in range(VERTEX_ATTRIBUTE_TEXCOORD_MAX_COUNT): + VERTEX_ATTRIBUTE_DATA_ORDER.append(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX + str(i)) +for i in range(VERTEX_ATTRIBUTE_COLOR_MAX_COUNT): + VERTEX_ATTRIBUTE_DATA_ORDER.append(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX + str(i)) + +class DmdTexture: + """DMD Texture reference. Does not store pixel data.""" + def __init__(self): + self.name = "" + self.size = (0, 0) + + def link(self, linker): + texture_blob_name = "textures:" + self.name + texture_data = bytearray(0x10) + linker.add_string(texture_blob_name, 0x0, self.name) + + # Everything past here is actually irrelevant because the game fills it + # in with data from the TPL in mapBuildTexture, but the original exporter + # puts accurate data here that gets overwritten later, so we will as well. + struct.pack_into(">L", texture_data, 0x0, 0) # image format + struct.pack_into(">HH", texture_data, 0x8, self.size[0], self.size[1]) # width/height + + linker.add_blob(texture_blob_name, texture_data) + linker.place_blob_in_section(texture_blob_name, "texture_data") + + @staticmethod + def from_blender_image(blender_image): + texture = DmdTexture() + texture.name = blender_image.name + texture.size = tuple(blender_image.size) + return texture + +class DmdMaterial: + def __init__(self): + self.name = "" + + self.use_fixed_color = True + self.fixed_color = (0.8, 0.8, 0.8, 1.0) + # Vertex color layers: Builtin TEV modes only use one vertex color and + # in fact support vertex color 1 is broken in retail TTYD, however mods + # may want to make use of more than one vertex color, so we support + # them in principle. + self.color_layers = [] + + self.samplers = [] + + self.blend_mode = 0 + self.tev_mode = 0 + + def get_referenced_vertex_attributes(self): + attributes = [ + VERTEX_ATTRIBUTE_POSITION_ID, + VERTEX_ATTRIBUTE_NORMAL_ID + ] + for i in range(len(self.color_layers)): + attributes.append(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX + str(i)) + for i in range(len(self.samplers)): + attributes.append(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX + str(i)) + return attributes + + def get_uv_layer_names(self): + layers = [] + for sampler in self.samplers: + layers.append(sampler["uv_layer"]) + return layers + + def get_color_layer_names(self): + return self.color_layers + + def link(self, linker): + material_blob_name = "materials:" + self.name + material_data = bytearray(0x114) + linker.add_string(material_blob_name, 0x000, self.name) + struct.pack_into( + ">BBBB", + material_data, + 0x004, + int(self.fixed_color[0] * 255), + int(self.fixed_color[1] * 255), + int(self.fixed_color[2] * 255), + int(self.fixed_color[3] * 255) + ) + struct.pack_into(">B", material_data, 0x008, 0 if self.use_fixed_color else 1) + struct.pack_into(">B", material_data, 0x00a, self.blend_mode) # blend mode + struct.pack_into(">B", material_data, 0x00b, len(self.samplers)) # texture count + + # Serialize samplers + for i, sampler in enumerate(self.samplers): + # External sampler data + sampler_blob_name = "samplers:" + str(linker.get_uid()) + sampler_data = bytearray(0xc) + + texture_blob_name = "textures:" + sampler["texture_name"] + linker.add_relocation(sampler_blob_name, 0x0, texture_blob_name) + + struct.pack_into(">BB", sampler_data, 0x8, sampler["wrapS"], sampler["wrapT"]) + + linker.add_blob(sampler_blob_name, sampler_data) + linker.place_blob_in_section(sampler_blob_name, "sampler_data") + + linker.add_relocation(material_blob_name, 0x00c + i * 4, sampler_blob_name) + + # Material-internal transform data + struct.pack_into( + ">ff", + material_data, + 0x02c + i * 0x1c + 0x00, + sampler["translation"][0], + sampler["translation"][1] + ) + struct.pack_into( + ">ff", + material_data, + 0x02c + i * 0x1c + 0x08, + sampler["scale"][0], + sampler["scale"][1] + ) + struct.pack_into( + ">f", + material_data, + 0x02c + i * 0x1c + 0x10, + sampler["rotation"] + ) + + # Fill in default texture coordinate transforms + for i in range(len(self.samplers), 8): + struct.pack_into(">f", material_data, 0x02c + i * 0x1c + 0x08, 1.0) # scale X + struct.pack_into(">f", material_data, 0x02c + i * 0x1c + 0x0c, 1.0) # scale Y + + # blend alpha modulation in red channel, not sure how to integrate into model + struct.pack_into(">L", material_data, 0x10c, 0xffffffff) + # todo-blender_io_ttyd: Investigate additional fields of TEV configuration structure + tev_config_blob_name = "tev_configs:" + self.name + tev_config_data = bytearray(0xc) + struct.pack_into(">L", tev_config_data, 0x00, self.tev_mode) + linker.add_blob(tev_config_blob_name, tev_config_data) + linker.place_blob_in_section(tev_config_blob_name, "tev_configs") + + linker.add_relocation(material_blob_name, 0x110, tev_config_blob_name) + + linker.add_blob(material_blob_name, material_data) + linker.place_blob_in_section(material_blob_name, "materials") + return material_blob_name + + @staticmethod + def from_blender_material(blender_material): + material = DmdMaterial() + material.name = blender_material.name + + if not blender_material.use_nodes: + print("io_scene_ttyd: Material {} has no node tree!".format(blender_material.name)) + return material + + # Find active output node + nodes = blender_material.node_tree.nodes + output_node = None + for n in nodes: + if not isinstance(n, bpy.types.ShaderNodeOutputMaterial): + continue + if not n.is_active_output: + continue + output_node = n + break + + if not output_node: + print("io_scene_ttyd: Material {} has no output node!".format(blender_material.name)) + return material + + def get_node_input_source(node, input_id): + if isinstance(input_id, str): + if not input_id in node.inputs: + return None + else: + if input_id >= len(node.inputs): + return None + + links = node.inputs[input_id].links + if not len(links): + return None + + return links[0].from_socket + + shader_source = get_node_input_source(output_node, "Surface") + assert(shader_source != None) + shader_node = shader_source.node + + # Handle transparent mix case + # todo-blender_io_ttyd: This is a very crude way to handle this case. + # Rethink and potentially redo this. + allow_transparency = False + if isinstance(shader_node, bpy.types.ShaderNodeMixShader): + # todo-blender_io_ttyd: Verify that the order of ShaderNode.inputs + # is guaranteed to be like this. + alpha_socket = get_node_input_source(shader_node, 0) + transparency_shader_socket = get_node_input_source(shader_node, 1) + color_shader_socket = get_node_input_source(shader_node, 2) + if (alpha_socket != None + and color_shader_socket != None + and isinstance(color_shader_socket.node, bpy.types.ShaderNodeBsdfDiffuse) + and transparency_shader_socket != None + and isinstance(transparency_shader_socket.node, bpy.types.ShaderNodeBsdfTransparent)): + color_shader_node = color_shader_socket.node + alpha_node = alpha_socket.node + allow_transparency = True + if not allow_transparency: + color_shader_node = shader_node + alpha_socket = None + + if isinstance(color_shader_node, bpy.types.ShaderNodeBsdfDiffuse): + color_socket_name = "Color" + else: + # No known shader + return material + + # Blend mode + if not allow_transparency: + material.blend_mode = 0 + elif blender_material.blend_method == 'OPAQUE': + material.blend_mode = 0 + elif blender_material.blend_method == 'CLIP': + # No blend, fixed alpha cutoff at 128 + material.blend_mode = 1 + elif blender_material.blend_method == 'BLEND': + material.blend_mode = 2 + + color_socket = color_shader_node.inputs[color_socket_name] + if not color_socket.is_linked: + # Fixed color + assert(isinstance(color_socket, bpy.types.NodeSocketColor)) + material.use_fixed_color = True + material.fixed_color = linear_to_srgb(color_socket.default_value) + return material + + def get_sampler_from_tex_node(node): + assert(isinstance(node, bpy.types.ShaderNodeTexImage)) + + sampler = {} + if not node.image: + return None + + sampler["texture_name"] = node.image.name + assert(node.extension != 'CLIP') + blender_extension_to_wrap_mode = { + 'REPEAT': 1, + 'EXTEND': 0, + } + dmd_wrap_mode = blender_extension_to_wrap_mode[node.extension] + sampler["wrapS"] = dmd_wrap_mode + sampler["wrapT"] = dmd_wrap_mode + + # Texture coordinate transform + sampler["translation"] = (0.0, 0.0) + sampler["rotation"] = 0.0 + sampler["scale"] = (1.0, 1.0) + + vector_source = get_node_input_source(node, "Vector") + if vector_source: + vector_node = vector_source.node + if isinstance(vector_node, bpy.types.ShaderNodeMapping): + assert(vector_node.vector_type == 'POINT') + sampler["translation"] = tuple(vector_node.translation[0:2]) + sampler["rotation"] = vector_node.rotation[2] + sampler["scale"] = tuple(vector_node.scale[0:2]) + # Keep track of the mapping node in order to map UV animations + # back to the right sampler + sampler["mapping_node"] = vector_node.name + uv_node = get_node_input_source(vector_node, "Vector").node + elif isinstance(vector_node, bpy.types.ShaderNodeUVMap): + uv_node = vector_node + else: + uv_node = None + + if uv_node: + sampler["uv_layer"] = uv_node.uv_map + else: + # Default UV parameters + sampler["translation"] = (0.0, 0.0) + sampler["rotation"] = 0.0 + sampler["scale"] = (1.0, 1.0) + + if "uv_layer" not in sampler: + sampler["uv_layer"] = "UVMap" + return sampler + + color_source = get_node_input_source(color_shader_node, "Color") + if color_source == None: + print("io_scene_ttyd: Material {} has no color node!".format(blender_material.name)) + return material + color_node = color_source.node + + # Parse vertex color if it exists, either as direct input or as multiply + + # todo-blender_io_ttyd: Implement vertex color alpha support + # todo-blender_io_ttyd: Implement vertex color multi-layer blend support + + # Note that we do not support vertex color alpha or more complex vertex + # color setups at this point. First-class support for alpha channels in + # vertex color layers is apparently a planned feature for Blender (you + # already can paint them, just not access in a material graph), so I + # will put off proper implementation of vertex colors until that time + # (expected possibly in Blender 2.81). + + if isinstance(color_node, bpy.types.ShaderNodeAttribute): + # Direct vertex color + material.color_layers = [color_node.attribute_name] + material.use_fixed_color = False + return material + elif isinstance(color_node, bpy.types.ShaderNodeMixRGB): + # Potentially: Multiplied vertex color over TEV setup + if (color_node.blend_type == "MULTIPLY" + and not color_node.inputs["Fac"].is_linked + and color_node.inputs["Fac"].default_value == 1.0): + + color1_source = get_node_input_source(color_node, "Color1") + color2_source = get_node_input_source(color_node, "Color2") + if (color1_source != None + and isinstance(color1_source.node, bpy.types.ShaderNodeAttribute)): + material.color_layers = [color1_source.node.attribute_name] + color_node = color2_source.node + elif (color2_source != None + and isinstance(color2_source.node, bpy.types.ShaderNodeAttribute)): + material.color_layers = [color2_source.node.attribute_name] + color_node = color1_source.node + if len(material.color_layers): + material.use_fixed_color = False + + if isinstance(color_node, bpy.types.ShaderNodeTexImage): + sampler = get_sampler_from_tex_node(color_node) + material.samplers.append(sampler) + material.fixed_color = (1.0, 1.0, 1.0, 1.0) + return material + + # todo-blender_io_ttyd: Implement additional TEV modes + + return material + +class DmdVcdTable: + def __init__(self): + self.attribute_data = defaultdict(list) + + def store_attribute_data(self, attribute, data): + # Try to find an existing instance of the data + stored_data = self.attribute_data[attribute] + for i in range(len(stored_data)): + if stored_data[i] == data: + return i + + # Did not find existing instance, add. + stored_data.append(data) + out_index = len(stored_data) - 1 + assert(out_index < 65536) # Max encodable index + return out_index + + def link(self, linker): + # Figure out quantizations + quantizations = {} + for attribute_name in self.attribute_data: + # Color is unquantized + if attribute_name.startswith(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX): + continue + # Normals are quantized at fixed scale + if attribute_name == VERTEX_ATTRIBUTE_NORMAL_ID: + continue + most_extreme_value = 0.0 + for entry in self.attribute_data[attribute_name]: + for value in entry: + most_extreme_value = max(abs(value), most_extreme_value) + + # Due to the fact that the positive maximum of a quantized value is + # one less than the optimal power of two, we have to add this bias + # in order to choose the next lower quantization in this case. + most_extreme_value += 1.0 + + if most_extreme_value == 0.0: + # Corner case which would make math.log throw an exception + max_magnitude = 0 + else: + max_magnitude = math.ceil(math.log2(most_extreme_value)) + best_quantization = -(max_magnitude - 15) + quantizations[attribute_name] = best_quantization + + # Serialize data in correct order + for attribute_name in VERTEX_ATTRIBUTE_DATA_ORDER: + if attribute_name not in self.attribute_data: + continue + + attribute_blob_name = "vertex_attribute_data:" + attribute_name + + unsigned = False + if attribute_name.startswith(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX): + element_width = 1 + element_count = 4 + quantization = 0 + unsigned = True + elif attribute_name == VERTEX_ATTRIBUTE_NORMAL_ID: + element_width = 1 + element_count = 3 + quantization = 6 + elif attribute_name == VERTEX_ATTRIBUTE_POSITION_ID: + element_width = 2 + element_count = 3 + quantization = quantizations[attribute_name] + elif attribute_name.startswith(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX): + element_width = 2 + element_count = 2 + quantization = quantizations[attribute_name] + else: + assert(False) + + # Pack data into buffer + unquantized_data = self.attribute_data[attribute_name] + + quantized_stride = element_width * element_count + attribute_buffer_size = 4 + quantized_stride * len(unquantized_data) + attribute_buffer_size = align_up(attribute_buffer_size, 32) + attribute_buffer = bytearray(attribute_buffer_size) + struct.pack_into(">L", attribute_buffer, 0x0, len(unquantized_data)) + + for data_index, data in enumerate(unquantized_data): + data_offset = 4 + quantized_stride * data_index + for element_index, element in enumerate(data): + element_offset = data_offset + element_width * element_index + + # Avoid unnecessary handling of unquantized data + if quantization != 0: + quantized_element = int(round(element * 2.0**quantization)) + #print("DmdVcdTable: Quantizing {}: {} -> {} (factor {})".format(attribute_name, element, quantized_element, 2.0**quantization)) + else: + quantized_element = element + + # Check for over/underflow + if quantization != 0: + max_quantized_magnitude = 2**(8 * element_width - 1) + if (quantized_element >= max_quantized_magnitude + or quantized_element < -max_quantized_magnitude): + print("DmdVcdTable: Unable to quantize {} value {}".format(attribute_name, element)) + + # Get right format string + if element_width == 1: + format_string = ">B" if unsigned else ">b" + elif element_width == 2: + assert(not unsigned) + format_string = ">h" + else: + assert(False) + + # Write the actual data + struct.pack_into( + format_string, + attribute_buffer, + element_offset, + quantized_element + ) + linker.add_blob(attribute_blob_name, attribute_buffer) + linker.place_blob_in_section(attribute_blob_name, "vertex_attribute_data") + + # Finally create VCD table + vcd_table_blob_name = "vcd_table" + vcd_table_data = bytearray(0x68) + + # todo-blender_io_ttyd: Clean up the variable attribute count tracking here. + color_count = 0 + tc_count = 0 + for attribute_name in self.attribute_data: + attribute_blob_name = "vertex_attribute_data:" + attribute_name + + if attribute_name == VERTEX_ATTRIBUTE_POSITION_ID: + struct.pack_into(">l", vcd_table_data, 0x44, quantizations[attribute_name]) + linker.add_relocation(vcd_table_blob_name, 0x00, attribute_blob_name) + elif attribute_name == VERTEX_ATTRIBUTE_NORMAL_ID: + linker.add_relocation(vcd_table_blob_name, 0x04, attribute_blob_name) + elif attribute_name.startswith(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX): + color_index = int(attribute_name[len(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX):]) + linker.add_relocation( + vcd_table_blob_name, + 0x0c + color_index * 4, + attribute_blob_name + ) + color_count += 1 + elif attribute_name.startswith(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX): + tc_index = int(attribute_name[len(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX):]) + tc_offset = tc_index * 4 + linker.add_relocation( + vcd_table_blob_name, + 0x18 + tc_offset, + attribute_blob_name + ) + struct.pack_into( + ">l", + vcd_table_data, + 0x48 + tc_offset, + quantizations[attribute_name] + ) + tc_count += 1 + + # Store color and texture coordinate counts + struct.pack_into(">L", vcd_table_data, 0x08, color_count) + struct.pack_into(">L", vcd_table_data, 0x14, tc_count) + + linker.add_blob(vcd_table_blob_name, vcd_table_data) + linker.place_blob_in_section(vcd_table_blob_name, "vcd_table") + return vcd_table_blob_name + +class DmdModel: + """DMD File Model with one material consisting of triangle strips""" + + def __init__(self): + self.material_name = "" + self.attributes = [] + self.polygons = [] + + def get_bbox(self): + # todo-blender_io_ttyd: Use util.get_bbox() instead + first_vertex = True + for p in self.polygons: + for v in p: + position = v[VERTEX_ATTRIBUTE_POSITION_ID] + if first_vertex: + first_vertex = False + bbox_min = list(position) + bbox_max = list(position) + continue + for i in range(3): + bbox_min[i] = min(bbox_min[i], position[i]) + bbox_max[i] = max(bbox_max[i], position[i]) + if first_vertex: + return None + return (tuple(bbox_min), tuple(bbox_max)) + + def link(self, linker, vcd_table): + # Write vertex data into VCD table and get final attribute indices + out_polygons = [] + for p in self.polygons: + out_vertices = [] + for v in p: + out_attribute_indices = [] + for attribute_name in VERTEX_ATTRIBUTE_INDEX_ORDER: + if attribute_name not in v: + continue + index = vcd_table.store_attribute_data(attribute_name, v[attribute_name]) + out_attribute_indices.append(index) + out_vertices.append(out_attribute_indices) + out_polygons.append(out_vertices) + + mesh_blob_name = "meshs:" + str(linker.get_uid()) + + # Align mesh size upwards to 32 bytes to maintain alignment + # with Polygons (containing display lists) + mesh_data_size = align_up(0x10 + 8 * len(out_polygons), 32) + mesh_data = bytearray(mesh_data_size) + struct.pack_into(">B", mesh_data, 0x0, 1) # Unknown + struct.pack_into(">B", mesh_data, 0x3, 1) # bPolygonsAreDisplayLists, always 1 in v1.02 + struct.pack_into(">L", mesh_data, 0x4, len(out_polygons)) # Polygon count + + # Build element mask + element_mask = 0 + for i, attribute in enumerate(VERTEX_ATTRIBUTE_INDEX_ORDER): + if attribute in self.attributes: + element_mask |= (1 << i) + struct.pack_into(">L", mesh_data, 0x8, element_mask) + + linker.add_relocation(mesh_blob_name, 0xc, "vcd_table") + + # Calculate stride + vertex_stride = 0 + for attribute in self.attributes: + if attribute in VERTEX_ATTRIBUTE_INDEX_ORDER: + vertex_stride += 2 + + for polygon_index, polygon in enumerate(out_polygons): + polygon_blob_name = mesh_blob_name + ":polygons:" + str(polygon_index) + + # Calculate aligned size + polygon_data_size = align_up(3 + len(polygon) * vertex_stride, 32) + polygon_data = bytearray(polygon_data_size) + struct.pack_into(">B", polygon_data, 0x0, 0x98) # display list leading draw opcode + struct.pack_into(">H", polygon_data, 0x1, len(polygon)) # vertex count + + for vertex_index, vertex in enumerate(polygon): + vertex_offset = 0x3 + vertex_index * vertex_stride + for attribute_index, attribute in enumerate(vertex): + # Fixed 16-bit indices + attribute_offset = vertex_offset + attribute_index * 2 + struct.pack_into(">H", polygon_data, attribute_offset, attribute) + + # Place Polygon data in same section as mesh data. This is + # emulating original exporter behavior, however it may not + # best choice for size and performance. + linker.add_blob(polygon_blob_name, polygon_data) + linker.place_blob_in_section(polygon_blob_name, "meshs") + + linker.add_relocation(mesh_blob_name, 0x10 + 8 * polygon_index, polygon_blob_name) + struct.pack_into(">L", mesh_data, 0x14 + 8 * polygon_index, polygon_data_size) + + linker.add_blob(mesh_blob_name, mesh_data) + linker.place_blob_in_section(mesh_blob_name, "meshs") + return mesh_blob_name + + @staticmethod + def list_from_blender_mesh(blender_mesh, materials): + material_data = {} + for blender_polygon in blender_mesh.polygons: + # Get appropriate material if this mesh has any and we're not doing collision + dmd_material = None + if materials != None and len(blender_mesh.materials) > 0: + blender_material = blender_mesh.materials[blender_polygon.material_index] + for m in materials: + if m.name == blender_material.name: + dmd_material = m + break + # Create new material if no existing one + if dmd_material == None: + dmd_material = DmdMaterial.from_blender_material(blender_material) + materials.append(dmd_material) + + attributes_to_store = dmd_material.get_referenced_vertex_attributes() + blender_uv_layers = dmd_material.get_uv_layer_names() + blender_color_layers = dmd_material.get_color_layer_names() + material_name = dmd_material.name + else: + # No material, store info necessary for collision detection + # todo-blender_io_ttyd: Is normal necessary for pure hit meshes? + attributes_to_store = [ + VERTEX_ATTRIBUTE_POSITION_ID, + VERTEX_ATTRIBUTE_NORMAL_ID + ] + blender_uv_layers = [] + blender_color_layers = [] + material_name = "" + + # Setup per-material polygon data + if material_name not in material_data: + material_data[material_name] = { + "polygons": [], + "attributes": attributes_to_store + } + + # Tessellate polygons into triangles + loop_indices = [i for i in blender_polygon.loop_indices] + polygon_tessellated_loop_indices = [] + if len(loop_indices) == 3: + polygon_tessellated_loop_indices.append(loop_indices[:]) + elif len(loop_indices) > 3: + # todo-blender_io_ttyd: Tessellate polygons into strips instead + # of outputting individual triangles + vertex_positions = [] + for loop_index in loop_indices: + loop = blender_mesh.loops[loop_index] + vertex = blender_mesh.vertices[loop.vertex_index] + vertex_positions.append(vertex.co) + tessellated_triangles = mathutils.geometry.tessellate_polygon([vertex_positions]) + for tri in tessellated_triangles: + # todo-blender_io_ttyd: Should blender_polygon.loop_indices + # not be simply loop_indices here? + tri_loop_indices = [blender_polygon.loop_indices[i] for i in tri] + polygon_tessellated_loop_indices.append(tri_loop_indices) + + # Extract attributes + # We store the raw attribute data; the data is deduplicated when + # linking in the VCD table + + # Evaluate face normal only once and share index if not smooth shaded + if (VERTEX_ATTRIBUTE_NORMAL_ID in attributes_to_store + and not blender_polygon.use_smooth): + face_normal = tuple(blender_polygon.normal) + + # todo-blender_io_ttyd: Think about removing attribute names from + # vertices here since they're already stored in the model's field + for triangle_loop_indices in polygon_tessellated_loop_indices: + vertices = [] + for loop_index in triangle_loop_indices: + vertex_attributes = {} + loop = blender_mesh.loops[loop_index] + vertex = blender_mesh.vertices[loop.vertex_index] + assert(VERTEX_ATTRIBUTE_POSITION_ID in attributes_to_store) + # todo-blender_io_ttyd: Think about flattening this loop + # into a series of if-statements with loops for + # colors/texcoords + for attribute_name in attributes_to_store: + if attribute_name == VERTEX_ATTRIBUTE_POSITION_ID: + vertex_attributes[attribute_name] = tuple(vertex.co) + elif attribute_name == VERTEX_ATTRIBUTE_NORMAL_ID: + if blender_polygon.use_smooth: + normal_data = tuple(vertex.normal) + else: + normal_data = face_normal + + vertex_attributes[attribute_name] = normal_data + elif attribute_name.startswith(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX): + tc_index = int(attribute_name[len(VERTEX_ATTRIBUTE_TEXCOORD_ID_PREFIX):]) + assert(tc_index < VERTEX_ATTRIBUTE_TEXCOORD_MAX_COUNT) + + if tc_index < len(blender_uv_layers): + tc_layer_name = blender_uv_layers[tc_index] + tc_layer = blender_mesh.uv_layers[tc_layer_name] + tc_data = tuple(tc_layer.data[loop_index].uv) + else: + # todo-blender_io_ttyd: Figure out if this is a + # fatal error; probably should be. + assert(False) + tc_data = (0.0, 0.0) + + vertex_attributes[attribute_name] = tc_data + elif attribute_name.startswith(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX): + color_index = int(attribute_name[len(VERTEX_ATTRIBUTE_COLOR_ID_PREFIX):]) + assert(color_index < VERTEX_ATTRIBUTE_COLOR_MAX_COUNT) + + if color_index < len(blender_color_layers): + color_layer_name = blender_color_layers[color_index] + color_layer = blender_mesh.vertex_colors[color_layer_name] + + # No SRGB conversion necessary! + color_data = tuple(color_layer.data[loop_index].color) + + # todo-blender_io_ttyd: Think about whether + # this is the best way to handle color + # quantization. + + # Convert to 0-255 here instead of VCD table as + # the quantizations there are different in that + # they multiply by 2**n; for colors however, + # this does not hold, as 1.0 maps to 255 and + # not to 256. + color_data = tuple(int(x * 255) for x in color_data) + else: + # todo-blender_io_ttyd: Figure out if this is a + # probably should be. + assert(False) + color_data = (255, 0, 255, 255) + + vertex_attributes[attribute_name] = color_data + vertices.append(vertex_attributes) + material_data[material_name]["polygons"].append(vertices) + + # Build final models + models = [] + for material_name in material_data: + model = DmdModel() + model.material_name = material_name + model.attributes = material_data[material_name]["attributes"] + model.polygons = material_data[material_name]["polygons"] + models.append(model) + return models + +class DmdJoint: + """DMD File Joint""" + + def __init__(self): + self.name = "" + + self.children = [] + self.models = [] + + self.translation = (0.0, 0.0, 0.0) + self.rotation = (0.0, 0.0, 0.0) + self.scale = (1.0, 1.0, 1.0) + + def link(self, linker, vcd_table, parent = None, next = None, prev = None): + blob_name = "joints:{}".format(self.name) + joint_data = bytearray(0x60 + len(self.models) * 0x8) + + linker.add_string(blob_name, 0x00, self.name) + linker.add_string(blob_name, 0x04, "mesh" if len(self.models) > 0 else "null") + + if parent: + linker.add_relocation(blob_name, 0x08, "joints:{}".format(parent.name)) + if len(self.children) > 0: + linker.add_relocation(blob_name, 0x0c, "joints:{}".format(self.children[0].name)) + if next: + linker.add_relocation(blob_name, 0x10, "joints:{}".format(next.name)) + if prev: + linker.add_relocation(blob_name, 0x14, "joints:{}".format(prev.name)) + + struct.pack_into( + ">fff", joint_data, 0x18, + self.scale[0], self.scale[1], self.scale[2] + ) + struct.pack_into( + ">fff", joint_data, 0x24, + self.rotation[0], self.rotation[1], self.rotation[2] + ) + struct.pack_into( + ">fff", joint_data, 0x30, + self.translation[0], self.translation[1], self.translation[2] + ) + + # Compute local-space bounding box + bbox = None + for model in self.models: + model_bbox = model.get_bbox() + if bbox == None: + bbox = (list(model_bbox[0]), list(model_bbox[1])) + continue + for i in range(3): + bbox[0][i] = min(bbox[0][i], model_bbox[0][i]) + bbox[1][i] = max(bbox[1][i], model_bbox[1][i]) + + # Default to non-zero bounding box imitating original exporter behavior + if bbox == None: + bbox = ((-0.1, -0.1, -0.1), (0.1, 0.1, 0.1)) + else: + bbox = (tuple(bbox[0]), tuple(bbox[1])) + + struct.pack_into( + ">ffffff", joint_data, 0x3c, + bbox[0][0], bbox[0][1], bbox[0][2], + bbox[1][0], bbox[1][1], bbox[1][2] + ) + struct.pack_into(">L", joint_data, 0x54, 0) # unknown + + # todo-blender_io_ttyd: Further investigate drawmode + if parent != None: # Root node does not have drawmode + drawmode_blob_name = "drawmodes:{}".format(self.name) + linker.add_relocation(blob_name, 0x58, drawmode_blob_name) + drawmode_data = bytearray(0x14) + struct.pack_into(">B", drawmode_data, 0x1, 1) # cullMode = back + linker.add_blob(drawmode_blob_name, drawmode_data) + linker.place_blob_in_section(drawmode_blob_name, "drawmodes") + + struct.pack_into(">L", joint_data, 0x5c, len(self.models)) + + for i in range(len(self.models)): + model = self.models[i] + mesh_blob_name = model.link(linker, vcd_table) + + # Only link material if there is a material, hit data may not have one + if model.material_name != "": + material_blob_name = "materials:{}".format(model.material_name) + linker.add_relocation(blob_name, 0x60 + i * 8, material_blob_name) + + linker.add_relocation(blob_name, 0x64 + i * 8, mesh_blob_name) + + linker.add_blob(blob_name, joint_data) + linker.place_blob_in_section(blob_name, "joints") + + for i in range(len(self.children)): + if i > 0: + prev_child = self.children[i - 1] + else: + prev_child = None + if i < len(self.children) - 1: + next_child = self.children[i + 1] + else: + next_child = None + self.children[i].link(linker, vcd_table, self, next_child, prev_child) + return blob_name + + @staticmethod + def from_blender_object(blender_object, materials, global_matrix = None): + joint = DmdJoint() + joint.name = blender_object.name + + joint.children = [] + for c in blender_object.children: + joint.children.append(DmdJoint.from_blender_object(c, materials)) + + transform_matrix = blender_object.matrix_local + if global_matrix != None: + transform_matrix = global_matrix @ transform_matrix + translation, rotation, scale = transform_matrix.decompose() + joint.translation = translation.to_tuple() + + rotation_euler = rotation.to_euler() + joint.rotation = ( + math.degrees(rotation_euler.x), + math.degrees(rotation_euler.y), + math.degrees(rotation_euler.z) + ) + + joint.scale = scale.to_tuple() + + if blender_object.type == "MESH": + blender_depsgraph = bpy.context.evaluated_depsgraph_get() + blender_evaluated_object = blender_object.evaluated_get(blender_depsgraph) + blender_mesh = blender_evaluated_object.to_mesh() + joint.models = DmdModel.list_from_blender_mesh(blender_mesh, materials) + blender_evaluated_object.to_mesh_clear() + + return joint + + @staticmethod + def from_blender_collection(collection, materials, global_matrix = None): + joint = DmdJoint() + joint.name = collection.name + + joint.children = [] + first_level_objects = [x for x in collection.all_objects if not x.parent] + for c in first_level_objects: + joint.children.append(DmdJoint.from_blender_object(c, materials, global_matrix)) + return joint + + +def blender_anim_data_from_dmd_object(target): + anim_data = None + if isinstance(target, DmdJoint): + anim_data = bpy.data.objects[target.name].animation_data + elif isinstance(target, DmdMaterial): + blender_material = bpy.data.materials[target.name] + if blender_material.node_tree != None: + anim_data = blender_material.node_tree.animation_data + else: + # todo-blender_io_ttyd: Add light animations + assert(False) + return anim_data + +def value_keyframe_convert_to_degrees(value_keyframe): + new_value_keyframe = list(value_keyframe) + for var_index in range(3): + new_value_keyframe[var_index] = math.degrees(value_keyframe[var_index]) + return tuple(new_value_keyframe) + + +class DmdAnimation: + def __init__(self): + self.name = "" + self.length = 0.0 + self.joint_transform_tracks = [] + self.material_uv_tracks = [] + self.material_blend_tracks = [] # TODO + self.light_transform_tracks = [] # TODO + self.light_parameter_tracks = [] # TODO + + def link(self, linker): + anim_blob_name = "animations:{}".format(self.name) + anim_data = bytearray(0x28) + + linker.add_string(anim_blob_name, 0x00, self.name) + + # Length in frames (float) + struct.pack_into(">f", anim_data, 0x08, self.length) + + # Helper to pack a keyframe tuple + def pack_keyframe_into(buffer, offset, data): + # Value, tangent in, tangent out, unk_0c, is_step + # unk_0c is zero in all animations in all TTYD maps, so we don't + # bother storing + struct.pack_into( + ">fffLL", buffer, offset, + data[0], data[1], data[2], 0, 1 if data[3] else 0 + ) + + # Tuples of (track list, offset to track table pointer in animation, + # header size, components) + track_type_descriptions = { + "joint_transform": ( + self.joint_transform_tracks, + 0x0c, # offset to track table ptr in animation + [ + ("translation", 3), + ("rotation", 3), + ("scale", 3), + (None, 3 * 4), # Unused fields + ], + 0x58 # header size + ), + "material_uv": ( + self.material_uv_tracks, + 0x10, + [ + ("translation", 2), + ("scale", 2), + ("rotation", 1), + ], + 0x10 + ), + "material_blend": ( + self.material_blend_tracks, + 0x14, + [ + ("color", 4), + ], + 0x4 + ), + "light_transform": ( + self.light_transform_tracks, + 0x18, + [ + ("translation", 3), + ("rotation", 3), + ("scale", 3), + ], + 0x4 + ), + "light_parameters": ( + self.light_parameter_tracks, + 0x1c, + [ + ("color", 3), + ("spot_angle", 1), + ("angular_attenuation", 3), + ], + 0x4 + ), + } + + all_track_type_blob_names = [] + for track_type_name, track_type_description in track_type_descriptions.items(): + track_type_blob_names = [] + + track_list = track_type_description[0] + animation_track_type_table_offset = track_type_description[1] + track_components = track_type_description[2] + track_header_size = track_type_description[3] + + if len(track_list) < 1: + continue + + track_total_component_count = sum([x[1] for x in track_components]) + # All keyframes are time as float header followed by some amount of + # component keyframes + track_keyframe_size = 0x4 + track_total_component_count * 0x14 + + for track in track_list: + track_blob_name = "animation_data:tracks:{}".format(linker.get_uid()) + track_data = bytearray(track_header_size + 0x4 + len(track["keyframes"]) * track_keyframe_size) + + if track_type_name == "joint_transform": + linker.add_string(track_blob_name, 0x00, track["joint_name"]) + + # Translation/rotation/scale origin + struct.pack_into( + ">fff", track_data, 0x04, + track["translation_origin"][0], + track["translation_origin"][1], + track["translation_origin"][2] + ) + struct.pack_into( + ">fff", track_data, 0x10, + track["rotation_origin"][0], + track["rotation_origin"][1], + track["rotation_origin"][2] + ) + struct.pack_into( + ">fff", track_data, 0x1c, + track["scale_origin"][0], + track["scale_origin"][1], + track["scale_origin"][2] + ) + + # Four unused sets, the first and third are sometimes set to + # 0.408818 on the Y-axis. Presumably these are Maya-specific + # additional transforms. We do not use this value here since it + # makes no actual difference and is not consistently used in the + # original exporter. + struct.pack_into(">fff", track_data, 0x28, 0.0, 0.0, 0.0) + struct.pack_into(">fff", track_data, 0x34, 0.0, 0.0, 0.0) + struct.pack_into(">fff", track_data, 0x40, 0.0, 0.0, 0.0) + struct.pack_into(">fff", track_data, 0x4c, 0.0, 0.0, 0.0) + elif track_type_name == "material_uv": + linker.add_string(track_blob_name, 0x0, track["material_name"]) + struct.pack_into(">L", track_data, 0x4, track["sampler_index"]) + + # Scale-independent translation for alignment, not animatable + struct.pack_into(">ff", track_data, 0x8, track["align"][0], track["align"][1]) + elif track_type_name == "material_blend": + linker.add_string(track_blob_name, 0x0, track["material_name"]) + elif track_type_name == "light_transform": + linker.add_string(track_blob_name, 0x0, track["light_name"]) + elif track_type_name == "light_parameters": + linker.add_string(track_blob_name, 0x0, track["light_name"]) + + # Export the actual keyframes + struct.pack_into(">L", track_data, track_header_size + 0x0, len(track["keyframes"])) + + for keyframe_index, keyframe in enumerate(track["keyframes"]): + keyframe_offset = track_header_size + 0x4 + keyframe_index * track_keyframe_size + + struct.pack_into(">f", track_data, keyframe_offset, keyframe["time"]) + + component_offset = keyframe_offset + 0x4 + for value_index, value_info in enumerate(track_components): + value_key = value_info[0] + value_component_count = value_info[1] + for component_index in range(value_component_count): + if value_key != None: + if value_component_count == 1: + component_source_data = keyframe[value_key] + else: + component_source_data = keyframe[value_key][component_index] + else: + component_source_data = (0.0, 0.0, 0.0, 0) + pack_keyframe_into( + track_data, + component_offset, + component_source_data + ) + component_offset += 0x14 + assert(component_offset == keyframe_offset + track_keyframe_size) + + # We do not place the blob into a section here to emulate + # original exporter behavior. The original exporter serializes + # all data referenced by an animation into one contiguous chunk + # in a section that follows the serialized animations, with the + # track table located immediately *before* the tracks it + # contains. To do this, the track table must be placed in this + # shared section *after* the tracks themselves have been + # serialized. + linker.add_blob(track_blob_name, track_data) + track_type_blob_names.append(track_blob_name) + + # Serialize track table + track_table_blob_name = "animation_data:tables:{}".format(linker.get_uid()) + track_table_data = bytearray(0x4 + len(track_type_blob_names) * 4) + struct.pack_into(">L", track_table_data, 0x0, len(track_type_blob_names)) + for track_index, track_blob_name in enumerate(track_type_blob_names): + linker.add_relocation(track_table_blob_name, 0x4 + track_index * 4, track_blob_name) + linker.add_relocation( + anim_blob_name, + animation_track_type_table_offset, + track_table_blob_name + ) + linker.add_blob(track_table_blob_name, track_table_data) + linker.place_blob_in_section(track_table_blob_name, "animation_data") + + # Place tracks after their respective table emulating original exporter behavior. + for track_blob_name in track_type_blob_names: + linker.place_blob_in_section(track_blob_name, "animation_data") + + linker.add_blob(anim_blob_name, anim_data) + linker.place_blob_in_section(anim_blob_name, "animations") + return anim_blob_name + + @staticmethod + def list_from_blender_nla_tracks_for_targets(targets, global_matrix = None): + animation_sources = defaultdict(set) + for target in targets: + anim_data = blender_anim_data_from_dmd_object(target) + if anim_data == None: + continue + + for track in anim_data.nla_tracks: + animation_sources[track.name].add(target) + + animations = [] + for name, targets in animation_sources.items(): + animation = DmdAnimation.from_blender_name_and_targets( + name, + targets, + global_matrix + ) + animations.append(animation) + return animations + + @staticmethod + def from_blender_name_and_targets(name, targets, global_matrix = None): + anim = DmdAnimation() + anim.name = name + + def any_keyframes_exist(target, action, blender_fcurve_mapping): + # Check if there are any FCurves in this action that matter for + # this track type + for fcurve in action.fcurves: + blender_data_source = (fcurve.data_path, fcurve.array_index) + if blender_data_source in blender_fcurve_mapping: + return True + return False + + def make_keyframes(target, action, keyframe_layout, blender_fcurve_mapping): + # There may not be keyframes for all attributes in Blender, however + # the map animations must always have them, so for convenience we + # insert best-effort keyframes. + keyframe_times = set() + for fcurve in action.fcurves: + for k in fcurve.keyframe_points: + keyframe_times.add(k.co[0]) + sorted_keyframe_times = sorted(keyframe_times) + + keyframes = [] + for time in sorted_keyframe_times: + # Init keyframe data + keyframe = {} + keyframe["time"] = time + + for name, count in keyframe_layout.items(): + if count == 1: + keyframe[name] = None + else: + keyframe[name] = [None] * count + + # Try and find corresponding keyframe for each fcurve + for fcurve in action.fcurves: + # If we don't use this FCurve, don't bother to convert it + blender_data_source = (fcurve.data_path, fcurve.array_index) + if blender_data_source not in blender_fcurve_mapping: + continue + + found_keyframe = False + for k in fcurve.keyframe_points: + if k.co[0] == time: + found_keyframe = True + + value = k.co[1] + tangent_in = (k.co[1] - k.handle_left[1]) / (k.co[0] - k.handle_left[0]) + tangent_out = (k.handle_right[1] - k.co[1]) / (k.handle_left[0] - k.co[0]) + is_step = False + break + + # todo-blender_io_ttyd: Handle missing keyframes with + # better approximation. + # If there is no keyframe, approximate badly based on curve + # data + if not found_keyframe: + delta_time_step = 1 + value = fcurve.evaluate(time) + prev_value = fcurve.evaluate(time - delta_time_step) / delta_time_step + next_value = fcurve.evaluate(time + delta_time_step) / delta_time_step + tangent_in = value - prev_value + tangent_out = next_value - value + is_step = False + + keyframe_component_data = (value, tangent_in, tangent_out, is_step) + target_key, target_index = blender_fcurve_mapping[blender_data_source] + if target_index != None: + keyframe[target_key][target_index] = keyframe_component_data + else: + keyframe[target_key] = keyframe_component_data + + keyframes.append(keyframe) + + # Fill any keyframes that were not accounted for by any FCurve with + # whatever the current value is. + blender_fcurve_reverse_mapping = { + v: k for k, v in blender_fcurve_mapping.items() + } + for keyframe in keyframes: + for value_key in keyframe_layout: + component_count = keyframe_layout[value_key] + for component_index in range(component_count): + if component_count == 1: + if keyframe[value_key] != None: + continue + else: + if keyframe[value_key][component_index] != None: + continue + + output_path = (value_key, component_index) + assert(output_path in blender_fcurve_reverse_mapping) + blender_source_path, blender_source_index = blender_fcurve_reverse_mapping[output_path] + + # Use Blender RNA magic to get the right property. + # todo-blender_io_ttyd: This shares code with the + # anim_data lookup. + blender_source_object = None + if isinstance(target, DmdJoint): + blender_source_object = bpy.data.objects[target.name] + elif isinstance(target, DmdMaterial): + blender_source_object = bpy.data.materials[target.name].node_tree + else: + assert(False) + + blender_source_property = blender_source_object.path_resolve(blender_source_path) + if blender_source_index != None: + blender_source_data = blender_source_property[blender_source_index] + else: + blender_source_data = blender_source_property + + # Constant value keyframe with zero slope + value_keyframe = ( + blender_source_data, + 0, + 0, + False + ) + if component_count == 1: + keyframe[value_key] = value_keyframe + else: + keyframe[value_key][component_index] = value_keyframe + + return keyframes + + def try_make_joint_transform_track(target, action, global_matrix = None): + blender_fcurve_mapping = { + ("location", 0): ("translation", 0), + ("location", 1): ("translation", 1), + ("location", 2): ("translation", 2), + ("rotation_euler", 0): ("rotation", 0), + ("rotation_euler", 1): ("rotation", 1), + ("rotation_euler", 2): ("rotation", 2), + ("scale", 0): ("scale", 0), + ("scale", 1): ("scale", 1), + ("scale", 2): ("scale", 2), + } + + if not any_keyframes_exist(target, action, blender_fcurve_mapping): + return None + + track = {} + track["joint_name"] = target.name + track["keyframes"] = make_keyframes( + target, + action, + { + "translation": 3, + "rotation": 3, + "scale": 3, + }, + blender_fcurve_mapping + ) + + # todo-blender_io_ttyd: Check that this is works correctly in all + # cases and no additional coordinate transform is required. + track["translation_origin"] = target.translation + track["rotation_origin"] = target.rotation + track["scale_origin"] = target.scale + + # Convert rotation to degrees + for keyframe in track["keyframes"]: + for component_index in range(len(keyframe["rotation"])): + keyframe["rotation"][component_index] = value_keyframe_convert_to_degrees( + keyframe["rotation"][component_index] + ) + + return track + + def try_make_material_uv_track(target, sampler_index, action): + # Check if this sampler is actually animated + mapping_node_name = target.samplers[sampler_index]["mapping_node"] + + fcurve_name_fmt = "nodes[\"" + mapping_node_name + "\"].{}" + translation_fcurve_name = fcurve_name_fmt.format("translation") + scale_fcurve_name = fcurve_name_fmt.format("scale") + rotation_fcurve_name = fcurve_name_fmt.format("rotation") + + blender_fcurve_mapping = { + (translation_fcurve_name, 0): ("translation", 0), + (translation_fcurve_name, 1): ("translation", 1), + (scale_fcurve_name, 0): ("scale", 0), + (scale_fcurve_name, 1): ("scale", 1), + (rotation_fcurve_name, 2): ("rotation", None), + } + if not any_keyframes_exist(target, action, blender_fcurve_mapping): + return None + + track = {} + track["material_name"] = target.name + track["sampler_index"] = sampler_index + + # This effectively defines the rotation pivot point; (1.0, 1.0) is + # centered. Blender always has this at (0.0, 0.0), which + # corresponds to (0.0, 2.0) in the coordinate system used for this + # field. + track["align"] = (0.0, 2.0) + + track["keyframes"] = make_keyframes( + target, + action, + { + "translation": 2, + "scale": 2, + "rotation": 1, + }, + blender_fcurve_mapping + ) + + # Convert rotation to degrees and invert + for keyframe in track["keyframes"]: + value_keyframe = value_keyframe_convert_to_degrees(keyframe["rotation"]) + keyframe["rotation"] = tuple([-x for x in value_keyframe[:3]] + [value_keyframe[3]]) + + return track + + # Find all NLA strips for each target, each of which is converted into + # one subanimation. + length = 0.0 + for target in targets: + anim_data = blender_anim_data_from_dmd_object(target) + if anim_data == None: + continue + + for nla_track in anim_data.nla_tracks: + # Only get relevant tracks for this animation + # Can't process tracks + if nla_track.name != name or len(nla_track.strips) != 1: + continue + strip = nla_track.strips[0] + length = max(length, strip.frame_end) + action = strip.action + + # Try to generate all types of tracks + if isinstance(target, DmdJoint): + track = try_make_joint_transform_track(target, action, global_matrix) + if track != None: + anim.joint_transform_tracks.append(track) + elif isinstance(target, DmdMaterial): + for sampler_index in range(len(target.samplers)): + track = try_make_material_uv_track(target, sampler_index, action) + if track != None: + anim.material_uv_tracks.append(track) + else: + assert(False) + anim.length = length + + # Notably we do not check for animations without tracks here. They do + # actually occur in TTYD and may be controlled through the animation + # system in order to then query the progress to drive some other state. + return anim + +class DmdFile: + def __init__(self): + self.root_joint = None + + self.map_joint = None + self.hit_joint = None + + self.materials = [] + self.textures = [] + self.animations = [] + + @staticmethod + def from_blender_scene(scene, settings): + file = DmdFile() + file.root_joint = DmdJoint() + file.root_joint.name = "world_root" + + global_matrix = mathutils.Matrix.Identity(4) + if "axis_conversion_matrix" in settings: + global_matrix = settings["axis_conversion_matrix"] + file.materials = [] + file.map_joint = DmdJoint.from_blender_collection( + settings["map_root"], + file.materials, + global_matrix + ) + file.hit_joint = DmdJoint.from_blender_collection( + settings["hit_root"], + None, + global_matrix + ) + + file.root_joint.children = [ + file.map_joint, + file.hit_joint + ] + + # Identify textures and sort into correct order + + # todo-blender_io_ttyd: This style of gathering textures is + # inconsistent with the style for gathering materials used a few lines + # above. They should probably use the same approach, however properties + # of the data are required for materials as the referenced vertex + # attributes are necessary for vertex attribute data serialization. + # Think about this. + + texture_order = [] + for material in file.materials: + samplers = material.samplers + for sampler in samplers: + texture_name = sampler["texture_name"] + if texture_name != "" and texture_name not in texture_order: + texture_order.append(texture_name) + + # Generate textures + file.textures = [] + for texture_name in texture_order: + blender_image = bpy.data.images[texture_name] + texture = DmdTexture.from_blender_image(blender_image) + file.textures.append(texture) + + # Identify animations + def get_all_children_nodes(joint): + # Post-order DFS + joint_list = [] + joint_stack = [(joint, 0)] + + while True: + if len(joint_stack) == 0: + break + + edge_node, next_child_index = joint_stack[-1] + + # Ascend + if next_child_index == len(edge_node.children): + del joint_stack[-1] + continue + + # Descend + next_child = edge_node.children[next_child_index] + joint_list.append(next_child) + joint_stack[-1] = (edge_node, next_child_index + 1) + joint_stack.append( + (next_child, 0) + ) + + return joint_list + + potential_animation_targets = [] + potential_animation_targets += get_all_children_nodes(file.map_joint) + potential_animation_targets += get_all_children_nodes(file.hit_joint) + potential_animation_targets += file.materials + # todo-blender_io_ttyd: Light animations + + file.animations = DmdAnimation.list_from_blender_nla_tracks_for_targets( + potential_animation_targets, + global_matrix + ) + + return file + + def serialize(self): + linker = DmdLinker() + + vcd_table = DmdVcdTable() + root_joint_blob_name = self.root_joint.link(linker, vcd_table) + vcd_table.link(linker) + + # Helper function to write a table containing references to other elements + def link_reference_table(linker, blob_name, section_name, references): + table_data = bytearray(0x4 + len(references) * 0x4) + struct.pack_into(">L", table_data, 0x0, len(references)) # Element count + for i, reference in enumerate(references): + linker.add_relocation(blob_name, 0x4 + i * 0x4, reference) + linker.add_blob(blob_name, table_data) + linker.place_blob_in_section(blob_name, section_name) + + # Animation table + animation_blob_names = [a.link(linker) for a in self.animations] + link_reference_table(linker, "animation_table", "animation_table", animation_blob_names) + + # Curve table (legacy, always empty) + link_reference_table(linker, "curve_table", "curve_table", []) + + # Fog table + # todo-blender_io_ttyd: Expose fog settings to user + fog_table_blob_name = "fog_table" + fog_table_data = bytearray(0x14) + struct.pack_into(">L", fog_table_data, 0x00, 0) # Fog enabled + struct.pack_into(">L", fog_table_data, 0x04, 0) # Fog mode + struct.pack_into(">f", fog_table_data, 0x08, 0) # Fog start + struct.pack_into(">f", fog_table_data, 0x0c, 1000) # Fog end + struct.pack_into(">L", fog_table_data, 0x10, 0x000000FF) # Fog color + linker.add_blob(fog_table_blob_name, fog_table_data) + linker.place_blob_in_section(fog_table_blob_name, "fog_table") + + # Light table + # todo-blender_io_ttyd: Light support + link_reference_table(linker, "light_table", "light_table", []) + + # Material name table + material_name_table_blob_name = "material_name_table" + material_name_table_data = bytearray(4 + len(self.materials) * 8) + struct.pack_into(">L", material_name_table_data, 0x0, len(self.materials)) + for i, material in enumerate(self.materials): + linker.add_string( + material_name_table_blob_name, + 0x4 + i * 8, + material.name + ) + material_blob_name = material.link(linker) + linker.add_relocation( + material_name_table_blob_name, + 0x8 + i * 8, + material_blob_name + ) + linker.add_blob(material_name_table_blob_name, material_name_table_data) + linker.place_blob_in_section(material_name_table_blob_name, "material_name_table") + + # Texture table + texture_table_blob_name = "texture_table" + texture_table_data = bytearray(0x4 + len(self.textures) * 0x4) + struct.pack_into(">L", texture_table_data, 0x0, len(self.textures)) # Texture count + for i, texture in enumerate(self.textures): + linker.add_string(texture_table_blob_name, 0x4 + i * 0x4, texture.name) + linker.add_blob(texture_table_blob_name, texture_table_data) + linker.place_blob_in_section(texture_table_blob_name, "texture_table") + + # Serialize referenced textures + for texture in self.textures: + texture.link(linker) + + # Information table + information_table_blob_name = "information" + information_table_data = bytearray(0x14) + + linker.add_string(information_table_blob_name, 0x00, "ver1.02") # Version string + linker.add_relocation(information_table_blob_name, 0x04, root_joint_blob_name) # World root + linker.add_string(information_table_blob_name, 0x08, self.map_joint.name) + linker.add_string(information_table_blob_name, 0x0c, self.hit_joint.name) + date_text = datetime.datetime.utcnow().strftime("%y/%m/%d %H:%M:%S") + linker.add_string(information_table_blob_name, 0x10, date_text) + + linker.add_blob(information_table_blob_name, information_table_data) + linker.place_blob_in_section(information_table_blob_name, "information") + + # Place sections and finalize linked data. + linker.place_section("information") + linker.place_section("texture_data") + linker.place_section("sampler_data") + linker.place_section("vertex_attribute_data", 32) + linker.place_section("materials") + linker.place_section("meshs") + linker.place_section("joints") + linker.place_section("vcd_table") + linker.place_section("material_name_table") + linker.place_section("light_table") + linker.place_section("fog_table") + linker.place_section("texture_table") + linker.place_section("curve_table") + linker.place_section("animation_table") + linker.place_section("animations") + linker.place_section("animation_data") + linker.place_section("drawmodes") + linker.place_section("tev_configs") + linker.place_section("strings") + + # Generate final data + assert(linker.resolve_relocations()) + linked_data = linker.serialize() + print(linker.dump_map()) + + # Pad out to multiple of 32 bytes + linked_data += (align_up(len(linked_data), 32) - len(linked_data)) * b"\x00" + + # Build table infos + # These appear alphabetically sorted, presumably this was done + # dynamically by the original exporter. This data is not created using + # the offset table, but instead hardcoded offsets into the file + table_order = [ + "animation_table", + "curve_table", + "fog_table", + "information", + "light_table", + "material_name_table", + "texture_table", + "vcd_table" + ] + + table_info_data = bytearray(len(table_order) * 8) + table_name_data = bytearray() + for i, table_name in enumerate(table_order): + struct.pack_into( + ">L", + table_info_data, + 0x0 + i * 8, + linker.get_blob_address(table_name) + ) + struct.pack_into(">L", table_info_data, 0x4 + i * 8, len(table_name_data)) # Name offset + encoded_table_name = table_name.encode("shift_jis") + b"\x00" + table_name_data += encoded_table_name + + # Build offset table + offsets = [] + for source_name, source_offset, target_name in linker.resolved_relocations: + offset = linker.get_blob_address(source_name) + source_offset + offsets.append(offset) + + # Offsets appears sorted in ascending order in table + offsets.sort() + + # Build final data + offset_table_data = bytearray(len(offsets) * 4) + for i, offset in enumerate(offsets): + struct.pack_into(">L", offset_table_data, i * 4, offset) + + # Build file header + header_data = bytearray(0x20) + struct.pack_into(">L", header_data, 0x4, len(linked_data)) + struct.pack_into(">L", header_data, 0x8, len(offsets)) + struct.pack_into(">L", header_data, 0xc, len(table_order)) + + # Assemble final file + # This order is important as only the location of the offset table is + # encoded in the header and the game assumes the table infos follow. + final_data = bytearray() + final_data += header_data + final_data += linked_data + final_data += offset_table_data + final_data += table_info_data + final_data += table_name_data + + # Add final file size + struct.pack_into(">L", final_data, 0x0, len(final_data)) + + return final_data \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/io_scene_ttyd/export_ttyd.py b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/export_ttyd.py new file mode 100644 index 00000000..4c2e2582 --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/export_ttyd.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright 2019 Linus S. (aka PistonMiner) + +import os + +import bpy + +from . import dmd +from . import tpl +from . import camera_road + +def export(context, settings): + # Generate main DMD + dmd_file = dmd.DmdFile.from_blender_scene( + context.scene, + settings + ) + dmd_data = dmd_file.serialize() + dmd_path = os.path.join(settings["root_path"], "d") + with open(dmd_path, "wb") as f: + f.write(dmd_data) + #print("Wrote DMD to {}".format(dmd_path)) + + # Generate TPL + tpl_file = tpl.TplFile() + for dmd_texture in dmd_file.textures: + blender_image = bpy.data.images[dmd_texture.name] + tpl_texture = tpl.TplTexture.from_blender_image(blender_image) + tpl_file.textures.append(tpl_texture) + tpl_data = tpl_file.serialize() + tpl_path = os.path.join(settings["root_path"], "t") + with open(tpl_path, "wb") as f: + f.write(tpl_data) + + # Generate camera road + camera_road_file = camera_road.CameraRoadFile.from_blender_scene( + context.scene, + settings + ) + camera_road_data = camera_road_file.serialize() + camera_road_path = os.path.join(settings["root_path"], "c") + with open(camera_road_path, "wb") as f: + f.write(camera_road_data) + + return {'FINISHED'} \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/io_scene_ttyd/tpl.py b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/tpl.py new file mode 100644 index 00000000..16eee90b --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/tpl.py @@ -0,0 +1,492 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright 2019 Linus S. (aka PistonMiner) + +import math + +from .util import * + +TPL_FORMAT_IDS = { + "I4": 0, + "I8": 1, + "IA4": 2, + "IA8": 3, + "RGB565": 4, + "RGB5A3": 5, + "RGBA32": 6, + "C4": 8, + "C8": 9, + "C14X2": 10, + "CMPR": 14, +} + +TPL_FORMAT_BLOCK_SIZES = { + "I4": (8, 8), + "I8": (8, 4), + "IA4": (8, 4), + "IA8": (4, 4), + "RGB565": (4, 4), + "RGB5A3": (4, 4), + "RGBA32": (4, 4), + "C4": (8, 8), + "C8": (8, 4), + "C14X2": (4, 4), + "CMPR": (8, 8), +} + +TPL_WRAP_MODES = { + "CLAMP": 0, + "REPEAT": 1, + "MIRROR": 2, +} + +def get_block(pixels, size, block_size, block_x, block_y): + # Find block bounds backed with image data + image_block_start_x = block_x * block_size[0] + image_block_start_y = block_y * block_size[1] + image_block_end_x = min(size[0], image_block_start_x + block_size[0]) + image_block_end_y = min(size[1], image_block_start_y + block_size[1]) + block_backed_size_x = image_block_end_x - image_block_start_x + block_backed_size_y = image_block_end_y - image_block_start_y + #print("TPL: Block {}/{} - actual size {}/{}".format(block_x, block_y, block_backed_size_x, block_backed_size_y)) + + # Extract rows + buffer_block_start = image_block_start_y * size[0] + image_block_start_x + block = [] + for block_row_index in range(block_size[1]): + if block_row_index >= block_backed_size_y: + block.extend([None] * block_size[0]) + continue + + buffer_row_start = buffer_block_start + block_row_index * size[0] + buffer_row_end = buffer_row_start + block_backed_size_x + + block.extend(pixels[buffer_row_start:buffer_row_end]) + if block_backed_size_x < block_size[0]: + block.extend([None] * (block_size[0] - block_backed_size_x)) + + return block + +def float_to_quantized(value, bits): + # Very primitive conversion + maximum_value = 2 ** bits - 1 + quantized = round(value * maximum_value) + assert(quantized <= 2 ** bits - 1) + return quantized + +def downsample_pixels(pixels, size): + # Box-filter four pixels into one. + block_size = (2, 2) + block_count_x = math.ceil(size[0] / block_size[0]) + block_count_y = math.ceil(size[1] / block_size[1]) + + downsampled_pixels = [] + for block_y in range(block_count_y): + for block_x in range(block_count_x): + block = get_block(pixels, size, block_size, block_x, block_y) + + real_pixels = [p for p in block if p != None] + filtered_pixel = [0.0, 0.0, 0.0, 0.0] + for pixel in real_pixels: + filtered_pixel = [pixel[i] / len(real_pixels) + filtered_pixel[i] for i in range(4)] + downsampled_pixels.append(tuple(filtered_pixel)) + + return downsampled_pixels + +def encode_pixels(pixels, size, image_format): + texture_data = bytearray() + + # We process one block at a time for memory reasons. + block_size = TPL_FORMAT_BLOCK_SIZES[image_format] + block_count_x = math.ceil(size[0] / block_size[0]) + block_count_y = math.ceil(size[1] / block_size[1]) + #print("TPL: Block count {}/{}".format(block_count_x, block_count_y)) + for block_y in range(block_count_y): + for block_x in range(block_count_x): + block = get_block(pixels, size, block_size, block_x, block_y) + + if image_format == "I4": + block_data = bytearray(len(block) / 2) + for i in range(len(block) / 2): + high_pixel = block[i * 2 + 0] + high_data = float_to_quantized(high_pixel[0], 4) if high_pixel else 0 + low_pixel = block[i * 2 + 1] + low_data = float_to_quantized(low_pixel[0], 4) if low_pixel else 0 + struct.pack_into(">B", block_data, i * 2, high_data << 4 | low_data) + elif image_format == "I8": + block_data = bytearray(len(block)) + for i, pixel in enumerate(block): + pixel_data = float_to_quantized(pixel[0], 8) if pixel else 0 + struct.pack_into(">B", block_data, i, pixel_data) + elif image_format == "IA4": + block_data = bytearray(len(block)) + for i, pixel in enumerate(block): + if pixel: + intensity_data = float_to_quantized(pixel[0], 4) + alpha_data = float_to_quantized(pixel[3], 4) + else: + intensity_data = 0 + alpha_data = 0 + struct.pack_into(">B", block_data, i, intensity_data << 4 | alpha_data) + elif image_format == "IA8": + block_data = bytearray(len(block) * 2) + for i, pixel in enumerate(block): + if pixel: + intensity_data = float_to_quantized(pixel[0], 8) + alpha_data = float_to_quantized(pixel[3], 8) + else: + intensity_data = 0 + alpha_data = 0 + struct.pack_into(">BB", block_data, i * 2, intensity_data, alpha_data) + elif image_format == "RGB565": + block_data = bytearray(len(block) * 2) + for i, pixel in enumerate(block): + if pixel: + red_data = float_to_quantized(pixel[0], 5) + green_data = float_to_quantized(pixel[1], 6) + blue_data = float_to_quantized(pixel[2], 5) + else: + red_data = 0 + green_data = 0 + blue_data = 0 + final_data = red_data << 11 | green_data << 5 | blue_data + struct.pack_into(">H", block_data, i * 2, final_data) + elif image_format == "RGB5A3": + block_data = bytearray(len(block) * 2) + for i, pixel in enumerate(block): + if pixel: + has_alpha = pixel[3] < (224 / 255) + if has_alpha: + alpha_data = float_to_quantized(pixel[3], 3) + red_data = float_to_quantized(pixel[0], 4) + green_data = float_to_quantized(pixel[1], 4) + blue_data = float_to_quantized(pixel[2], 4) + + packed_data = 0 + packed_data |= alpha_data << 12 + packed_data |= red_data << 8 + packed_data |= green_data << 4 + packed_data |= blue_data + else: + red_data = float_to_quantized(pixel[0], 5) + green_data = float_to_quantized(pixel[1], 5) + blue_data = float_to_quantized(pixel[2], 5) + + packed_data = 1 << 15 + packed_data |= red_data << 10 + packed_data |= green_data << 5 + packed_data |= blue_data + else: + packed_data = 0 + struct.pack_into(">H", block_data, i * 2, packed_data) + elif image_format == "RGBA32": + low_block_data = bytearray(len(block) * 2) + high_block_data = bytearray(len(block) * 2) + for i, pixel in enumerate(block): + if pixel: + #print(pixel) + red_data = float_to_quantized(pixel[0], 8) + green_data = float_to_quantized(pixel[1], 8) + blue_data = float_to_quantized(pixel[2], 8) + alpha_data = float_to_quantized(pixel[3], 8) + + packed_low_data = alpha_data << 8 | red_data + packed_high_data = green_data << 8 | blue_data + else: + packed_low_data = 0 + packed_high_data = 0 + + struct.pack_into(">H", low_block_data, i * 2, packed_low_data) + struct.pack_into(">H", high_block_data, i * 2, packed_high_data) + block_data = low_block_data + high_block_data + elif image_format == "C4": + # todo-blender_io_ttyd: Add support for TPL palette image formats + assert(False) + elif image_format == "C8": + assert(False) + elif image_format == "C14X2": + assert(False) + elif image_format == "CMPR": + # Very primitive DXT1 compressor. Should be replaced. + # Try all colors in the blocks as endpoints and fit the + # best ones. Has various flaws like checking color similarity + # in float space and not in RGB565 space. Not optimized, can + # check same color combination twice. + # todo-blender_io_ttyd: Replace crude DXT1 compressor with + # Simon Brown's cluster fit technique. + def dxt1_compress_block(block): + has_alpha = False + for p in block: + if p == None: + continue + + if p[3] < 0.5: + has_alpha = True + break + + # Find colors relevant for palette creation + opaque_pixels = [pixel for pixel in block if pixel != None] + if has_alpha: + opaque_pixels = [pixel for pixel in opaque_pixels if pixel[3] >= 0.5] + + #print("TPL - CMPR: Opaque pixels {}".format(opaque_pixels)) + + def color_to_rgb565(color): + red_data = float_to_quantized(color[0], 5) + green_data = float_to_quantized(color[1], 6) + blue_data = float_to_quantized(color[2], 5) + packed_data = red_data << 11 | green_data << 5 | blue_data + return packed_data + + def lerp_colors(lhs, rhs, factor): + out_color = [lhs[i] * (1 - factor) + rhs[i] * factor for i in range(3)] + return tuple(out_color) + + def color_distance(lhs, rhs): + distance = sum([(lhs[i] - rhs[i]) ** 2 for i in range(3)]) + #print("Calculated distance: {}".format(distance)) + return distance + + if len(opaque_pixels) == 0: + # No opaque pixels, make everything transparent + assert(has_alpha) + best_block_indices = [3] * 16 + best_block_first_ep_packed = 0 + best_block_second_ep_packed = 0 + else: + best_block_distance = None + best_block_first_ep_packed = None + best_block_second_ep_packed = None + best_block_indices = None + for first_ep_index in range(len(opaque_pixels)): + for second_ep_index in range(len(opaque_pixels)): + first_ep_color = opaque_pixels[first_ep_index][:3] + second_ep_color = opaque_pixels[second_ep_index][:3] + #print("TPL - CMPR: Trying {}/{}".format(first_ep_color, second_ep_color)) + + first_ep_packed = color_to_rgb565(first_ep_color) + second_ep_packed = color_to_rgb565(second_ep_color) + #print("TPL - CMPR: Packed endpoints: {:x}/{:x}".format(first_ep_packed, second_ep_packed)) + + # Force colors in right order if using alpha + if has_alpha and not (first_ep_packed <= second_ep_packed): + first_ep_color, second_ep_color = second_ep_color, first_ep_color + first_ep_packed, second_ep_packed = second_ep_packed, first_ep_packed + + # Construct palette + if first_ep_packed <= second_ep_packed: + palette_colors = [ + first_ep_color, + second_ep_color, + lerp_colors(first_ep_color, second_ep_color, 1/2) + ] + else: + palette_colors = [ + first_ep_color, + second_ep_color, + lerp_colors(first_ep_color, second_ep_color, 1/3), + lerp_colors(first_ep_color, second_ep_color, 2/3) + ] + + # Fit colors to palette + block_indices = [] + for pixel in block: + # Pixels outside the image + # Color we pick doesn't matter so we'll go with + # the potentially transparent one + if pixel == None: + block_indices.append(3) + continue + + # Transparent pixels + if has_alpha and pixel[3] < 0.5: + block_indices.append(3) + continue + + best_pixel_distance = None + best_pixel_index = None + for palette_index, palette_color in enumerate(palette_colors): + distance = color_distance(pixel, palette_color) + if (best_pixel_distance == None + or distance < best_pixel_distance): + #print("New best index: {}".format(palette_index)) + best_pixel_distance = distance + best_pixel_index = palette_index + + assert(best_pixel_distance != None) + block_indices.append(best_pixel_index) + + #print("TPL - CMPR: Candidate {}".format(block_indices)) + + # Calculate block score + block_distance = 0.0 + for i in range(len(block)): + original_color = block[i] + if original_color == None: + continue + + palette_index = block_indices[i] + + # Skip alpha entries + if has_alpha and palette_index == 3: + continue + + palette_color = palette_colors[palette_index] + block_distance += color_distance(original_color, palette_color) + + if (best_block_distance == None + or block_distance < best_block_distance): + best_block_distance = block_distance + best_block_first_ep_packed = first_ep_packed + best_block_second_ep_packed = second_ep_packed + best_block_indices = block_indices + + # Pack final block + #print("TPL - CMPR: Best indices: {}".format(best_block_indices)) + block_data = bytearray(8) + struct.pack_into(">HH", block_data, 0x0, best_block_first_ep_packed, best_block_second_ep_packed) + packed_indices = 0 + for i, palette_index in enumerate(best_block_indices): + packed_indices |= (palette_index << ((15 - i) * 2)) + struct.pack_into(">L", block_data, 0x4, packed_indices) + + return block_data + + # Actual compression + #print("TPL - CMPR: Block {}".format(block)) + block_data = bytearray() + for subblock_y in range(2): + for subblock_x in range(2): + subblock = get_block(block, block_size, (4, 4), subblock_x, subblock_y) + subblock_data = dxt1_compress_block(subblock) + block_data += subblock_data + else: + assert(False) + + # Write back block + texture_data += block_data + + return texture_data + +class TplTexture: + def __init__(self): + self.size = (0, 0) + self.wrap = 1 # repeat + self.texture_data = bytearray() + self.format = "" + + def link(self, linker): + texture_id = linker.get_uid() + texture_info_blob_name = "texture_infos:" + str(texture_id) + texture_info_data = bytearray(0x8) + + texture_header_blob_name = "texture_headers:" + str(texture_id) + texture_header_data = bytearray(0x24) + + struct.pack_into(">HH", texture_header_data, 0x00, self.size[0], self.size[1]) + struct.pack_into(">L", texture_header_data, 0x04, TPL_FORMAT_IDS[self.format]) + + wrap_mode = TPL_WRAP_MODES[self.wrap] + struct.pack_into(">L", texture_header_data, 0x0c, wrap_mode) # wrap S + struct.pack_into(">L", texture_header_data, 0x10, wrap_mode) # wrap T + + # Min/Mag filter set to linear, unused for map materials + struct.pack_into(">L", texture_header_data, 0x14, 1) # min filter + struct.pack_into(">L", texture_header_data, 0x18, 1) # mag filter + + # Serialize image data + texture_data_blob_name = "texture_data:" + str(texture_id) + + texture_data = self.texture_data.copy() + texture_data.extend(b"\x00" * align_up(len(texture_data), 32)) + + linker.add_blob(texture_data_blob_name, texture_data) + linker.place_blob_in_section(texture_data_blob_name, "texture_data") + + linker.add_relocation( + texture_header_blob_name, + 0x08, + texture_data_blob_name + ) + + linker.add_relocation(texture_info_blob_name, 0x0, texture_header_blob_name) + + linker.add_blob(texture_header_blob_name, texture_header_data) + linker.place_blob_in_section(texture_header_blob_name, "texture_headers") + + linker.add_blob(texture_info_blob_name, texture_info_data) + linker.place_blob_in_section(texture_info_blob_name, "texture_infos") + return texture_info_blob_name + + @staticmethod + def from_blender_image(blender_image, blender_extension = 'REPEAT', format = None): + texture = TplTexture() + texture.size = tuple(blender_image.size) + + # Convert wrap mode + if blender_extension == 'REPEAT': + texture.wrap = "REPEAT" + elif blender_extension == 'EXTEND': + texture.wrap = "CLAMP" + else: + # clip is not supported + assert(False) + + # Store image data + channel_count = blender_image.channels + raw_pixel_data = tuple(blender_image.pixels) + pixel_count = len(raw_pixel_data) // channel_count + + # Convert N-channel pixels to fixed RGBA + pixels = [] + default_pixel_data = (0.0, 0.0, 0.0, 1.0) + for i in range(pixel_count): + # Get pixel dat from blender + pixel_start_offset = i * channel_count + pixel_end_offset = pixel_start_offset + min(channel_count, len(default_pixel_data)) + backed_pixel_data = blender_image.pixels[pixel_start_offset:pixel_end_offset] + + # Extend pixel data if the image has less than four channels. + extended_pixel_data = backed_pixel_data + #for j in range(len(backed_pixel_data), len(default_pixel_data)): + # extended_pixel_data.append(default_pixel_data[j]) + + #print(extended_pixel_data) + pixels.append(tuple(extended_pixel_data)) + + # Convert texture data to target format now to save memory + if format == None: + # This really ought to be CMPR, but the current DXT1 encoder is so + # slow that I'm using RGB5A3 for now for decent quality + format = "RGB5A3" + texture.format = format + texture.texture_data = encode_pixels(pixels, texture.size, format) + return texture + +class TplFile: + def __init__(self): + self.textures = [] + + def serialize(self): + linker = Linker() + + header_blob_name = "header" + header_data = bytearray(0xc) + struct.pack_into(">L", header_data, 0x0, 0x0020af30) # magic + struct.pack_into(">L", header_data, 0x4, len(self.textures)) # texture count + + linker.add_blob(header_blob_name, header_data) + linker.place_blob_in_section(header_blob_name, "header") + + for i, texture in enumerate(self.textures): + texture_blob_name = texture.link(linker) + if i == 0: + linker.add_relocation(header_blob_name, 0x8, texture_blob_name) + + # Place sections and serialize + linker.place_section("header") + linker.place_section("texture_infos") + linker.place_section("texture_headers") + linker.place_section("texture_data", 0x20) + assert(linker.resolve_relocations()) + data = linker.serialize() + return data \ No newline at end of file diff --git a/ttyd-tools/blender_io_ttyd/io_scene_ttyd/util.py b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/util.py new file mode 100644 index 00000000..7b57bcd6 --- /dev/null +++ b/ttyd-tools/blender_io_ttyd/io_scene_ttyd/util.py @@ -0,0 +1,163 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright 2019 Linus S. (aka PistonMiner) + +from collections import OrderedDict +import struct + +def align_up(value, alignment): + return (value + alignment - 1) & ~(alignment - 1) + +def get_bbox(points): + if len(points) == 0: + return None + + dimensions = len(points[0]) + bbox_min = points[0] + bbox_max = points[0] + for point in points: + bbox_min = [min(bbox_min[i], point[i]) for i in range(dimensions)] + bbox_max = [max(bbox_max[i], point[i]) for i in range(dimensions)] + + return (bbox_min, bbox_max) + +def merge_bboxes(bboxes): + merged_bbox = None + for bbox in bboxes: + if merged_bbox == None: + merged_bbox = (list(bbox[0]), list(bbox[1])) + continue + for i in range(3): + merged_bbox[0][i] = min(merged_bbox[0][i], bbox[0][i]) + merged_bbox[1][i] = max(merged_bbox[1][i], bbox[1][i]) + return merged_bbox + +class Linker: + def __init__(self): + self.blob_data = {} + self.blob_addresses = {} + + self.section_addresses = {} + self.section_blobs = {} + + self.resolved_relocations = [] + self.unresolved_relocations = [] + + self.current_uid = 0 + + def place_blob_in_section(self, blob_name, section_name): + if not section_name in self.section_blobs: + self.section_blobs[section_name] = [] + self.section_blobs[section_name].append(blob_name) + + def get_blob_address(self, blob_name): + return self.blob_addresses[blob_name] if blob_name in self.blob_addresses else None + + def get_section_blob_count(self, section_name): + if section_name in self.section_blobs: + return len(self.section_blobs[section_name]) + else: + return 0 + + def place_section(self, section_name, align = 1): + if not section_name in self.section_blobs: + return + if not len(self.section_blobs[section_name]): + return + + next_free = 0 + for blob_name in self.blob_addresses: + blob_size = len(self.blob_data[blob_name]) + blob_end = self.blob_addresses[blob_name] + blob_size + next_free = max(next_free, blob_end) + + next_free = align_up(next_free, align) + self.place_section_at(section_name, next_free) + + def place_section_at(self, section_name, address): + current_address = address + for blob_name in self.section_blobs[section_name]: + self.blob_addresses[blob_name] = current_address + current_address += len(self.blob_data[blob_name]) + + def get_section_address(self, section_name): + if section_name not in self.section_blobs: + return 0 + + if len(self.section_blobs[section_name]) < 1: + return 0 + + first_blob = self.section_blobs[section_name][0] + assert(first_blob in self.blob_addresses) + return self.blob_addresses[first_blob] + + def blob_exists(self, blob_name): + return blob_name in self.blob_data + + def add_blob(self, name, data): + # Catch name collisions here + if name in self.blob_data: + print("Name collision on blob {}!".format(name)) + assert(False) + self.blob_data[name] = data + + def add_relocation(self, source_name, source_offset, target_name): + relocation = (source_name, source_offset, target_name) + self.unresolved_relocations.append(relocation) + + def get_uid(self): + self.current_uid += 1 + return self.current_uid + + def resolve_relocations(self): + while True: + resolved_any = False + i = 0 + while i < len(self.unresolved_relocations): + source_name, source_offset, target_name = self.unresolved_relocations[i] + if target_name in self.blob_addresses: + target_address = self.blob_addresses[target_name] + struct.pack_into(">L", self.blob_data[source_name], source_offset, target_address) + self.resolved_relocations.append(self.unresolved_relocations.pop(i)) + resolved_any = True + else: + i += 1 + if not resolved_any: + break + + # Debug logging + if len(self.unresolved_relocations) != 0: + for source_name, source_offset, target_name in self.unresolved_relocations: + print("Unresolved relocation from {}+0x{:x} to {}".format(source_name, source_offset, target_name)) + + return len(self.unresolved_relocations) == 0 + + def dump_map(self): + entries = [] + for blob_name in self.blob_addresses: + address = self.blob_addresses[blob_name] + data = self.blob_data[blob_name] + entries.append((address, blob_name, len(data))) + + entries.sort(key=lambda x: x[0]) + + map_text = "Linker map:\n" + for address, name, size in entries: + map_text += "{:8x}: {} (size {})\n".format(address, name, size) + return map_text + + def serialize(self): + blocks_to_write = [] + for blob_name in self.blob_addresses: + address = self.blob_addresses[blob_name] + data = self.blob_data[blob_name] + blocks_to_write.append((address, data)) + blocks_to_write.sort(key=lambda x: x[0]) + + last_block = blocks_to_write[-1] + end_address = last_block[0] + len(last_block[1]) + + serialized_data = bytearray(end_address) + for address, data in blocks_to_write: + serialized_data[address:address + len(data)] = data + + return serialized_data \ No newline at end of file From 5dbde22f52c7916fabd8dba82775fffe7f9cd4c7 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Fri, 18 Oct 2019 00:08:59 +0200 Subject: [PATCH 28/90] blender_io_ttyd: Improve README --- ttyd-tools/blender_io_ttyd/README.md | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/ttyd-tools/blender_io_ttyd/README.md b/ttyd-tools/blender_io_ttyd/README.md index 1d7b8459..7df8f74b 100644 --- a/ttyd-tools/blender_io_ttyd/README.md +++ b/ttyd-tools/blender_io_ttyd/README.md @@ -51,16 +51,19 @@ Map object transform and texture coordinate transform animations are currently s ### Camera The Camera collection contains the camera marker meshes and curves that will end up in the camera road file (`c`). -A camera curve defines the path the camera should follow in order to track Mario and map to Blender curve objects. There is exactly one curve active at any one time. A curve is considered to be active when any of the markers attached to it is the one Mario is standing in. -Markers are meshes that define "zones" of which curves should be active depending on where Mario stands. The first marker mesh that is hit when projecting Mario's position downwards is considered active. This usually just means that you should put the marker slightly below where Mario should be standing in order to activate it. +A camera curve defines the path the camera should follow in order to track Mario and map to Blender curve objects. There is exactly one curve active at any one time. A curve is considered to be active when any of the markers attached to it is the one Mario is standing in. The actual curve should lie on the XZ-plane (XY in Blender). When the curve is active, the camera is positioned pointing orthogonally to the left-hand side of the curve and "slides" around the curve until it points at Mario. Effectively, this means that if you want to have the camera point forward, the curve should be going from left to right, so that the left side of the curve is facing forwards. + +Markers are meshes that define "zones" that define which curve should be active depending on where Mario stands. The first marker mesh that is hit when projecting Mario's position downwards is considered active. Effectively this means that you should put the marker slightly below whereever Mario should be standing in order to activate it. + Every curve must have at least one marker. To define the markers for a curve, add custom properties on the Blender object (the object data-block, **not** the Curve data-block) named "marker0", "marker1", etc. with the names of the marker objects for that curve. Only "marker0" is required. + TODO: Other curve properties ## Known limitations * No support for: - * Lights - * Fog - * Different TEV modes - * Hit attributes - * Different texture formats (partially implemented) - * Material blend alpha (partially implemented) \ No newline at end of file + * Lights + * Fog + * Different TEV modes + * Hit attributes + * Different texture formats (partially implemented) + * Material blend alpha (partially implemented) From 28b3dfdb179b2b8fb3ebab9324089eeae3dd8431 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Fri, 18 Oct 2019 18:40:47 +0200 Subject: [PATCH 29/90] blender_io_ttyd: Add note about using noclip.website for preview to README --- ttyd-tools/blender_io_ttyd/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ttyd-tools/blender_io_ttyd/README.md b/ttyd-tools/blender_io_ttyd/README.md index 7df8f74b..aa567e57 100644 --- a/ttyd-tools/blender_io_ttyd/README.md +++ b/ttyd-tools/blender_io_ttyd/README.md @@ -10,8 +10,11 @@ Copy the folder `io_scene_ttyd` in this directory into the `scripts/addons/` dir ## Playtesting To actually playtest your map in-game, you can use a tool like GCRebuilder to place the exported map files (`d`, `c`, `t`) in a subfolder of the disc image's `m` folder. + You can either replace an existing map which you can then just go to in the game, or you can put it in as a completely new map. If you elect to go with the second option, you can use e.g. a REL GCI mod to load the map up dynamically, such as the one in the `rel` folder of this repository. +For a quick preview, you can also use Jasper's [noclip.website](https://noclip.website) by selecting **just the `d` and `t` files** (not `c` or any others), and dragging and dropping them into the browser with the site open. + ## Usage Different Blender collections are used to map objects to different functions in the map and must be selected accordingly when exporting. All collections must be present even if unused. Objects should not be in more than one of these functional collections. From cfe68d1026937f286a4507d301ca622a78cb9341 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Sun, 24 Nov 2019 18:16:33 +0100 Subject: [PATCH 30/90] elf2rel: Add support for REL file format version 1 and 2 --- ttyd-tools/elf2rel/elf2rel.cpp | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/ttyd-tools/elf2rel/elf2rel.cpp b/ttyd-tools/elf2rel/elf2rel.cpp index 88858807..d6162afa 100644 --- a/ttyd-tools/elf2rel/elf2rel.cpp +++ b/ttyd-tools/elf2rel/elf2rel.cpp @@ -39,6 +39,7 @@ std::map loadSymbolMap(const std::string &filename) } void writeModuleHeader(std::vector &buffer, + int version, int id, int sectionCount, int sectionInfoOffset, @@ -63,7 +64,7 @@ void writeModuleHeader(std::vector &buffer, save(buffer, sectionInfoOffset); save(buffer, 0); // name offset save(buffer, 0); // name size - save(buffer, 3); // version + save(buffer, version); // version save(buffer, totalBssSize); save(buffer, relocationOffset); @@ -76,9 +77,15 @@ void writeModuleHeader(std::vector &buffer, save(buffer, prologOffset); save(buffer, epilogOffset); save(buffer, unresolvedOffset); - save(buffer, maxAlign); - save(buffer, maxBssAlign); - save(buffer, fixedDataSize); + if (version >= 2) + { + save(buffer, maxAlign); + save(buffer, maxBssAlign); + } + if (version >= 3) + { + save(buffer, fixedDataSize); + } } void writeSectionInfo(std::vector &buffer, int offset, int size) @@ -117,6 +124,7 @@ int main(int argc, char **argv) std::string lstFilename; std::string relFilename = ""; int moduleID = 33; + int relVersion = 3; { namespace po = boost::program_options; @@ -127,7 +135,8 @@ int main(int argc, char **argv) ("input-file,i", po::value(&elfFilename), "Input ELF filename (required)") ("symbol-file,s", po::value(&lstFilename), "Input symbol file name (required)") ("output-file,o", po::value(&relFilename), "Output REL filename") - ("rel-id", po::value(&moduleID)->default_value(0x1000), "REL file ID"); + ("rel-id", po::value(&moduleID)->default_value(0x1000), "REL file ID") + ("rel-version", po::value(&relVersion)->default_value(3), "REL file format version (1, 2, 3)"); po::positional_options_description positionals; positionals.add("input-file", -1); @@ -144,7 +153,9 @@ int main(int argc, char **argv) if (varMap.count("help") || varMap.count("input-file") != 1 - || varMap.count("symbol-file") != 1) + || varMap.count("symbol-file") != 1 + || relVersion < 1 + || relVersion > 3) { std::cout << description << "\n"; return 1; @@ -217,7 +228,7 @@ int main(int argc, char **argv) std::vector outputBuffer; // Dummy values for header until offsets are determined - writeModuleHeader(outputBuffer, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + writeModuleHeader(outputBuffer, relVersion, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); int sectionInfoOffset = outputBuffer.size(); for (int i = 0; i < inputElf.sections.size(); ++i) { @@ -523,6 +534,7 @@ int main(int argc, char **argv) // Write final header std::vector headerBuffer; writeModuleHeader(headerBuffer, + relVersion, moduleID, inputElf.sections.size(), sectionInfoOffset, From b7479f6968837c574c6485b112bf5229a3180c4b Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Mon, 25 Nov 2019 22:01:43 +0100 Subject: [PATCH 31/90] rel: Add header and symbols for mapdata.o --- ttyd-tools/rel/include/ttyd.eu.lst | 6 ++++++ ttyd-tools/rel/include/ttyd.jp.lst | 6 ++++++ ttyd-tools/rel/include/ttyd.us.lst | 6 ++++++ ttyd-tools/rel/include/ttyd/mapdata.h | 19 +++++++++++++++++++ 4 files changed, 37 insertions(+) create mode 100644 ttyd-tools/rel/include/ttyd/mapdata.h diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index b833bf2b..4c72f3b5 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -102,6 +102,12 @@ 8003023c:memClear 80030300:memInit +// mapdata.o +800390c0:relSetBtlAddr +80039170:relSetEvtAddr +800392c8:areaDataPtr +80039360:mapDataPtr + // mario.o 800562b8:toFrontPose 800567e8:toRearPose diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index 99dbc636..e51a763a 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -100,6 +100,12 @@ 8002faf0:memClear 8002fbb4:memInit +// mapdata.o +8003893c:relSetBtlAddr +800389ec:relSetEvtAddr +80038b44:areaDataPtr +80038bdc:mapDataPtr + // mario.o 800553ac:toFrontPose 800558c0:toRearPose diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index d4a6d6b0..22592a90 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -102,6 +102,12 @@ 80030154:memClear 80030218:memInit +// mapdata.o +80038fd8:relSetBtlAddr +80039088:relSetEvtAddr +800391e0:areaDataPtr +80039278:mapDataPtr + // mario.o 800561b8:toFrontPose 800566cc:toRearPose diff --git a/ttyd-tools/rel/include/ttyd/mapdata.h b/ttyd-tools/rel/include/ttyd/mapdata.h new file mode 100644 index 00000000..6abd918b --- /dev/null +++ b/ttyd-tools/rel/include/ttyd/mapdata.h @@ -0,0 +1,19 @@ +#pragma once + +#include "database.h" + +#include + +namespace ttyd::mapdata { + +extern "C" +{ + +void relSetBtlAddr(const char *areaName, const void *battleInfos, const database::DatabaseDefinition *nameToInfoIdTable); +void relSetEvtAddr(const char *mapName, const void *pInitEvtCode); +void **areaDataPtr(const char *areaName); +void **mapDataPtr(const char *mapName); + +} + +} \ No newline at end of file From 4e82eedb833fb12a4f663b660c474364386109f6 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Tue, 26 Nov 2019 18:35:49 +0100 Subject: [PATCH 32/90] rel: Add header and symbols for evtmgr.o --- ttyd-tools/rel/include/ttyd.eu.lst | 29 ++++++ ttyd-tools/rel/include/ttyd.jp.lst | 29 ++++++ ttyd-tools/rel/include/ttyd.us.lst | 29 ++++++ ttyd-tools/rel/include/ttyd/evtmgr.h | 129 +++++++++++++++++++++++++++ 4 files changed, 216 insertions(+) create mode 100644 ttyd-tools/rel/include/ttyd/evtmgr.h diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index 4c72f3b5..4d38e8eb 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -102,6 +102,35 @@ 8003023c:memClear 80030300:memInit +// evtmgr.o +80035b10:evtGetPtrID +80035b70:evtGetPtr +80035bac:evtStartOther +80035c34:evtStopOther +80035cbc:evtStartAll +80035d50:evtStopAll +80035de4:evtStartID +80035e84:evtStopID +80035f24:evtStart +800362f8:evtStop +800366cc:evtSetType +800366d4:evtSetSpeed +800366e4:evtSetPri +800366ec:evtCheckID +8003674c:evtDeleteID +800367e8:evtDelete +800369b8:evtmgrMain +80036ba4:evtRestart +80036cec:evtBrotherEntry +80036fd4:evtChildEntry +80037308:evtEntryType +80037594:evtEntry +// 80037820:evtEntryRunCheck +80037898:evtmgrReInit +80037904:evtmgrInit +// 80037a1c:make_pri_table +80037b48:evtGetWork + // mapdata.o 800390c0:relSetBtlAddr 80039170:relSetEvtAddr diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index e51a763a..c7d07e44 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -100,6 +100,35 @@ 8002faf0:memClear 8002fbb4:memInit +// evtmgr.o +8003538c:evtGetPtrID +800353ec:evtGetPtr +80035428:evtStartOther +800354b0:evtStopOther +80035538:evtStartAll +800355cc:evtStopAll +80035660:evtStartID +80035700:evtStopID +800357a0:evtStart +80035b74:evtStop +80035f48:evtSetType +80035f50:evtSetSpeed +80035f60:evtSetPri +80035f68:evtCheckID +80035fc8:evtDeleteID +80036064:evtDelete +80036234:evtmgrMain +80036420:evtRestart +80036568:evtBrotherEntry +80036850:evtChildEntry +80036b84:evtEntryType +80036e10:evtEntry +// 8003709c:evtEntryRunCheck +80037114:evtmgrReInit +80037180:evtmgrInit +// 80037298:make_pri_table +800373c4:evtGetWork + // mapdata.o 8003893c:relSetBtlAddr 800389ec:relSetEvtAddr diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index 22592a90..ff8a28fc 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -102,6 +102,35 @@ 80030154:memClear 80030218:memInit +// evtmgr.o +80035a28:evtGetPtrID +80035a88:evtGetPtr +80035ac4:evtStartOther +80035b4c:evtStopOther +80035bd4:evtStartAll +80035c68:evtStopAll +80035cfc:evtStartID +80035d9c:evtStopID +80035e3c:evtStart +80036210:evtStop +800365e4:evtSetType +800365ec:evtSetSpeed +800365fc:evtSetPri +80036604:evtCheckID +80036664:evtDeleteID +80036700:evtDelete +800368d0:evtmgrMain +80036abc:evtRestart +80036c04:evtBrotherEntry +80036eec:evtChildEntry +80037220:evtEntryType +800374ac:evtEntry +// 80037738:evtEntryRunCheck +800377b0:evtmgrReInit +8003781c:evtmgrInit +// 80037934:make_pri_table +80037a60:evtGetWork + // mapdata.o 80038fd8:relSetBtlAddr 80039088:relSetEvtAddr diff --git a/ttyd-tools/rel/include/ttyd/evtmgr.h b/ttyd-tools/rel/include/ttyd/evtmgr.h new file mode 100644 index 00000000..945f711e --- /dev/null +++ b/ttyd-tools/rel/include/ttyd/evtmgr.h @@ -0,0 +1,129 @@ +#pragma once + +#include + +namespace ttyd::evtmgr { + +struct EvtEntry; + +struct EvtEntry +{ + int64_t timeSinceStart; + uint8_t flags; + int8_t paramCount; + uint8_t opcode; + int8_t executionOrder; + uint8_t typeMask; + bool sleeping; + uint8_t loopStackIndex; + uint8_t switchStackIndex; + int8_t wNpcEventType; + uint8_t pad_10[3]; + int32_t nextCommandPtr; + void *currentCommandArguments; + int8_t labelIdTable[16]; + void *labelAddressTable[16]; + EvtEntry *waitingEvt; + EvtEntry *waitingOnEvt; + EvtEntry *parentEvt; + int32_t sleepTimeMs; + int64_t sleepStartTime; + int32_t wInterpolationStartedNpcFlag; + int64_t wInterpRelatedTime; + int32_t unk_90; + int32_t unk_94; + int32_t currentSleepingFuncPtr; + int32_t lwData[16]; + uint32_t lfData; + int32_t unk_e0; + int32_t unk_e4; + void *loopStartAddressStack[8]; + int32_t loopIterationsLeftStack[8]; + int8_t switchStateStack[8]; + int32_t switchValueStack[8]; + void* memoryCmdBase; + void *uwBase; + void *ufBase; + int32_t threadId; + void *wActorThisPtr; + float timescale; + int32_t timeScheduledToRun; + int32_t unk_16c; + void *wThisPtr; + void *wThisObjPtr; + int32_t wActiveMsgWindowId; + int32_t unk_17c; + int32_t unk_180; + int32_t unk_184; + int32_t unk_188; + int32_t unk_18c; + int32_t msgPriority; + int32_t unk_194; + int64_t wInterpolationStartTime2; + void *restartFromLocation; + char *name; + void *wCurrentCommandPtr; + int32_t unk_1ac; +} __attribute__((__packed__)); + +static_assert(sizeof(EvtEntry) == 0x1b0); + +struct EvtWork +{ + int32_t entryCount; + uint32_t gwData[32]; + uint32_t gfData[3]; + EvtEntry *entries; + uint32_t pad_94; + int64_t currentEvtTime; +} __attribute__((__packed__)); + +static_assert(sizeof(EvtWork) == 0xa0); + +extern "C" { + +void evtmgrInit(); +void evtmgrReInit(); +void evtmgrMain(); + +EvtWork *evtGetWork(); + +EvtEntry *evtEntry(void *evtCode, int32_t executionOrder, uint32_t flags); +EvtEntry *evtEntryType(void *evtCode, int8_t priority, uint32_t flags, uint32_t types); +EvtEntry *evtChildEntry(EvtEntry *parentEvt, void *evtCode, uint32_t flags); +EvtEntry *evtBrotherEntry(EvtEntry *parentEvt, void *evtCode, uint32_t flags); +EvtEntry *evtRestart(EvtEntry *evt); +void evtDelete(EvtEntry *evt); +void evtDeleteID(int32_t threadId); +bool evtCheckID(int32_t threadId); +void evtSetPri(EvtEntry *evt, int32_t priority); +void evtSetSpeed(EvtEntry *evt, float timescale); +void evtSetType(EvtEntry *evt, uint8_t typeMask); +void evtStop(EvtEntry *evt, uint8_t typeMask); +void evtStart(EvtEntry *evt, uint8_t typeMask); +void evtStopID(int32_t threadId); +void evtStartID(int32_t threadId); +void evtStopAll(uint8_t typeMask); +void evtStartAll(uint8_t typeMask); +void evtStopOther(EvtEntry *evt, uint8_t typeMask); +void evtStartOther(EvtEntry *evt, uint8_t typeMask); +EvtEntry *evtGetPtr(int32_t index); +EvtEntry *evtGetPtrID(int32_t threadId); + +// Local functions +// void make_pri_table(); +// void evtEntryRunCheck(EvtEntry *evt); + +} + +#define EVT_DECLARE_USER_FUNC(name, parameter_count) \ + constexpr int name##_parameter_count = (parameter_count); \ + int32_t name(ttyd::evtmgr::EvtEntry *evt, bool isFirstCall); + +#define EVT_DEFINE_USER_FUNC(name) \ + int32_t name(ttyd::evtmgr::EvtEntry *evt, bool isFirstCall) + +#define EVT_DECLARE(name) \ + extern const int32_t name[]; + +} \ No newline at end of file From 0440d4950db6e7ade21d246fc861ae978124cdca Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Tue, 26 Nov 2019 18:37:52 +0100 Subject: [PATCH 33/90] rel: Add support for writing evts --- ttyd-tools/rel/include/evt_cmd.h | 360 +++++++++++++++++++++++++++++++ 1 file changed, 360 insertions(+) create mode 100644 ttyd-tools/rel/include/evt_cmd.h diff --git a/ttyd-tools/rel/include/evt_cmd.h b/ttyd-tools/rel/include/evt_cmd.h new file mode 100644 index 00000000..b7cfc6f5 --- /dev/null +++ b/ttyd-tools/rel/include/evt_cmd.h @@ -0,0 +1,360 @@ +#pragma once + +#include + +#define EVT_BEGIN(name) \ + const int32_t name[] = { +#define EVT_END() \ + 0x1 }; + +#define EVT_HELPER_OP(op) \ + reinterpret_cast((op)) + +// Expression types +#define EVT_HELPER_EXPR(base, offset) \ + EVT_HELPER_OP((base) + (offset)) + +#define EVT_HELPER_LW_BASE -30000000 +#define EVT_HELPER_GW_BASE -50000000 +#define EVT_HELPER_LF_BASE -70000000 +#define EVT_HELPER_GF_BASE -90000000 +#define EVT_HELPER_LSWF_BASE -110000000 +#define EVT_HELPER_GSWF_BASE -130000000 +#define EVT_HELPER_LSW_BASE -150000000 +#define EVT_HELPER_GSW_BASE -170000000 +#define EVT_HELPER_UW_BASE -190000000 +#define EVT_HELPER_UF_BASE -210000000 +#define EVT_HELPER_FLOAT_BASE -230000000 +#define EVT_HELPER_POINTER_BASE -250000000 + +#define LW(id) \ + EVT_HELPER_EXPR(EVT_HELPER_LW_BASE, id) +#define GW(id) \ + EVT_HELPER_EXPR(EVT_HELPER_GW_BASE, id) +#define LF(id) \ + EVT_HELPER_EXPR(EVT_HELPER_LF_BASE, id) +#define GF(id) \ + EVT_HELPER_EXPR(EVT_HELPER_GF_BASE, id) +#define LSWF(id) \ + EVT_HELPER_EXPR(EVT_HELPER_LSWF_BASE, id) +#define GSWF(id) \ + EVT_HELPER_EXPR(EVT_HELPER_GSWF_BASE, id) +#define LSW(id) \ + EVT_HELPER_EXPR(EVT_HELPER_LSW_BASE, id) +#define GSW(id) \ + EVT_HELPER_EXPR(EVT_HELPER_GSW_BASE, id) +#define UW(id) \ + EVT_HELPER_EXPR(EVT_HELPER_UW_BASE, id) +#define UF(id) \ + EVT_HELPER_EXPR(EVT_HELPER_UF_BASE, id) + +#define FLOAT(value) \ + EVT_HELPER_EXPR( \ + EVT_HELPER_FLOAT_BASE, static_cast((value) * 1024.f) \ + ) +#define PTR(value) \ + reinterpret_cast(value) + +// Commands +#define EVT_HELPER_CMD(parameter_count, opcode) \ + static_cast( \ + static_cast((parameter_count) << 16 | (opcode)) \ + ) + +#define RETURN() \ + EVT_HELPER_CMD(0, 2), + +#define LBL(id) \ + EVT_HELPER_CMD(1, 3), EVT_HELPER_OP(id), +#define GOTO(id) \ + EVT_HELPER_CMD(1, 4), EVT_HELPER_OP(id), + +#define DO(iteration_count) \ + EVT_HELPER_CMD(1, 5), EVT_HELPER_OP(iteration_count), +#define WHILE() \ + EVT_HELPER_CMD(0, 6), +#define DO_BREAK() \ + EVT_HELPER_CMD(0, 7), +#define DO_CONTINUE() \ + EVT_HELPER_CMD(0, 8), + +#define WAIT_FRM() \ + EVT_HELPER_CMD(1, 9), +#define WAIT_MSEC() \ + EVT_HELPER_CMD(1, 10), +#define HALT(until) \ + EVT_HELPER_CMD(1, 11), EVT_HELPER_OP(until), + +#define IF_STR_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 12), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_STR_NOT_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 13), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_STR_SMALL(lhs, rhs) \ + EVT_HELPER_CMD(2, 14), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_STR_LARGE(lhs, rhs) \ + EVT_HELPER_CMD(2, 15), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_STR_SMALL_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 16), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_STR_LARGE_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 17), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), + +#define IFF_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 18), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IFF_NOT_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 19), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IFF_SMALL(lhs, rhs) \ + EVT_HELPER_CMD(2, 20), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IFF_LARGE(lhs, rhs) \ + EVT_HELPER_CMD(2, 21), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IFF_SMALL_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 22), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IFF_LARGE_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 23), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), + +#define IF_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 24), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_NOT_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 25), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_SMALL(lhs, rhs) \ + EVT_HELPER_CMD(2, 26), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_LARGE(lhs, rhs) \ + EVT_HELPER_CMD(2, 27), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_SMALL_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 28), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define IF_LARGE_EQUAL(lhs, rhs) \ + EVT_HELPER_CMD(2, 29), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), + +#define IF_FLAG(val, mask) \ + EVT_HELPER_CMD(2, 30), EVT_HELPER_OP(val), reinterpret_cast(mask), +#define IF_NOT_FLAG(val, mask) \ + EVT_HELPER_CMD(2, 31), EVT_HELPER_OP(val), reinterpret_cast(mask), + +#define ELSE() \ + EVT_HELPER_CMD(0, 32), +#define END_IF() \ + EVT_HELPER_CMD(0, 33), + +#define SWITCH(val) \ + EVT_HELPER_CMD(1, 34), EVT_HELPER_OP(val) +#define SWITCHI(val) \ + EVT_HELPER_CMD(1, 35), EVT_HELPER_OP(val) + +#define CASE_EQUAL(val) \ + EVT_HELPER_CMD(1, 36), EVT_HELPER_OP(val), +#define CASE_NOT_EQUAL(val) \ + EVT_HELPER_CMD(1, 37), EVT_HELPER_OP(val), +#define CASE_SMALL(val) \ + EVT_HELPER_CMD(1, 38), EVT_HELPER_OP(val), +#define CASE_LARGE(val) \ + EVT_HELPER_CMD(1, 39), EVT_HELPER_OP(val), +#define CASE_SMALL_EQUAL(val) \ + EVT_HELPER_CMD(1, 40), EVT_HELPER_OP(val), +#define CASE_LARGE_EQUAL(val) \ + EVT_HELPER_CMD(1, 41), EVT_HELPER_OP(val), + +#define CASE_ETC() \ + EVT_HELPER_CMD(0, 42), +#define CASE_OR(val) \ + EVT_HELPER_CMD(1, 43), EVT_HELPER_OP(val), +#define CASE_AND(val) \ + EVT_HELPER_CMD(1, 44), EVT_HELPER_OP(val), +#define CASE_FLAG(mask) \ + EVT_HELPER_CMD(1, 45), reinterpret_cast(mask), +#define CASE_END() \ + EVT_HELPER_CMD(0, 46), +#define CASE_BETWEEN(low, high) \ + EVT_HELPER_CMD(2, 47), EVT_HELPER_OP(low), EVT_HELPER_OP(high), + +#define SWITCH_BREAK() \ + EVT_HELPER_CMD(0, 48), +#define END_SWITCH() \ + EVT_HELPER_CMD(0, 49), + +#define SET(lhs, rhs) \ + EVT_HELPER_CMD(2, 50), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define SETI(lhs, rhs) \ + EVT_HELPER_CMD(2, 51), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), +#define SETF(lhs, rhs) \ + EVT_HELPER_CMD(2, 52), EVT_HELPER_OP(lhs), EVT_HELPER_OP(rhs), + +#define ADD(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 53), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define SUB(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 54), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define MUL(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 55), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define DIV(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 56), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define MOD(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 57), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), + +#define ADDF(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 58), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define SUBF(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 59), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define MULF(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 60), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define DIVF(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 61), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), + +#define SET_READ(val) \ + EVT_HELPER_CMD(1, 62), EVT_HELPER_OP(val), +#define READ(out) \ + EVT_HELPER_CMD(1, 63), EVT_HELPER_OP(out), +#define READ2(out1, out2) \ + EVT_HELPER_CMD(2, 64), EVT_HELPER_OP(out1), EVT_HELPER_OP(out2), +#define READ3(out1, out2, out3) \ + EVT_HELPER_CMD(3, 65), EVT_HELPER_OP(out1), EVT_HELPER_OP(out2), \ + EVT_HELPER_OP(out3), +#define READ4(out1, out2, out3, out4) \ + EVT_HELPER_CMD(4, 66), EVT_HELPER_OP(out1), EVT_HELPER_OP(out2), \ + EVT_HELPER_OP(out3), EVT_HELPER_OP(out4), +#define READ_N(out, index) \ + EVT_HELPER_CMD(2, 67), EVT_HELPER_OP(out), EVT_HELPER_OP(index), + +#define SET_READF(val) \ + EVT_HELPER_CMD(1, 68), EVT_HELPER_OP(val), +#define READF(out) \ + EVT_HELPER_CMD(1, 69), EVT_HELPER_OP(out), +#define READF2(out1, out2) \ + EVT_HELPER_CMD(2, 70), EVT_HELPER_OP(out1), EVT_HELPER_OP(out2), +#define READF3(out1, out2, out3) \ + EVT_HELPER_CMD(3, 71), EVT_HELPER_OP(out1), EVT_HELPER_OP(out2), \ + EVT_HELPER_OP(out3), +#define READF4(out1, out2, out3, out4) \ + EVT_HELPER_CMD(4, 72), EVT_HELPER_OP(out1), EVT_HELPER_OP(out2), \ + EVT_HELPER_OP(out3), EVT_HELPER_OP(out4), +#define READF_N(out, index) \ + EVT_HELPER_CMD(2, 73), EVT_HELPER_OP(out), EVT_HELPER_OP(index), + +#define SET_USER_WRK(val) \ + EVT_HELPER_CMD(1, 74), EVT_HELPER_OP(val), +#define SET_USER_FLG(val) \ + EVT_HELPER_CMD(1, 75), EVT_HELPER_OP(val), +#define ALLOC_USER_WRK(count) \ + EVT_HELPER_CMD(1, 76), EVT_HELPER_OP(count), + +#define AND(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 77), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define ANDI(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 78), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define OR(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 79), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), +#define ORI(out, lhs, rhs) \ + EVT_HELPER_CMD(3, 80), EVT_HELPER_OP(out), EVT_HELPER_OP(lhs), \ + EVT_HELPER_OP(rhs), + +#define SET_FRAME_FROM_MSEC(out, in) \ + EVT_HELPER_CMD(2, 81), EVT_HELPER_OP(out), EVT_HELPER_OP(in), +#define SET_MSEC_FROM_FRAME(out, in) \ + EVT_HELPER_CMD(2, 82), EVT_HELPER_OP(out), EVT_HELPER_OP(in), +#define SET_RAM(val, ptr) \ + EVT_HELPER_CMD(2, 83), EVT_HELPER_OP(val), EVT_HELPER_OP(ptr), +#define SET_RAMF(val, ptr) \ + EVT_HELPER_CMD(2, 84), EVT_HELPER_OP(val), EVT_HELPER_OP(ptr), +#define GET_RAM(val, ptr) \ + EVT_HELPER_CMD(2, 85), EVT_HELPER_OP(out), EVT_HELPER_OP(ptr), +#define GET_RAMF(val, ptr) \ + EVT_HELPER_CMD(2, 86), EVT_HELPER_OP(out), EVT_HELPER_OP(ptr), + +// R is short for Reg +#define SETR(indirect, val) \ + EVT_HELPER_CMD(2, 87), EVT_HELPER_OP(indirect), EVT_HELPER_OP(val), +#define SETRF(indirect, val) \ + EVT_HELPER_CMD(2, 88), EVT_HELPER_OP(indirect), EVT_HELPER_OP(val), +#define GETR(indirect, out) \ + EVT_HELPER_CMD(2, 89), EVT_HELPER_OP(indirect), EVT_HELPER_OP(out), +#define GETRF(indirect, out) \ + EVT_HELPER_CMD(2, 90), EVT_HELPER_OP(indirect), EVT_HELPER_OP(out), + +// User function calls with validated parameter counts +template +class expression_assert +{ + static_assert(expression); +}; +using evt_helper_int_array = int32_t[]; +#define EVT_HELPER_NUM_ARGS(...) \ + (sizeof(evt_helper_int_array{ __VA_ARGS__ }) / sizeof(int32_t)) +#define USER_FUNC(function, ...) \ + ( \ + expression_assert< \ + function##_parameter_count == -1 \ + || function##_parameter_count == EVT_HELPER_NUM_ARGS(__VA_ARGS__) \ + >(), \ + EVT_HELPER_CMD(1 + EVT_HELPER_NUM_ARGS(__VA_ARGS__), 91) \ + ), \ + reinterpret_cast(function), \ + ##__VA_ARGS__ , + +#define RUN_EVT(evt) \ + EVT_HELPER_CMD(1, 92), EVT_HELPER_OP(evt), +#define RUN_EVT_ID(evt, out_id) \ + EVT_HELPER_CMD(2, 93), EVT_HELPER_OP(evt), EVT_HELPER_OP(out_id), +#define RUN_CHILD_EVT(evt) \ + EVT_HELPER_CMD(1, 94), EVT_HELPER_OP(evt), +#define DELETE_EVT(evt_id) \ + EVT_HELPER_CMD(1, 95), EVT_HELPER_OP(evt_id), +#define RESTART_EVT(evt) \ + EVT_HELPER_CMD(1, 96), EVT_HELPER_OP(evt), + +#define SET_PRI(pri) \ + EVT_HELPER_CMD(1, 97), EVT_HELPER_OP(pri), +#define SET_SPD(spd) \ + EVT_HELPER_CMD(1, 98), EVT_HELPER_OP(spd), +#define SET_TYPE(type_mask) \ + EVT_HELPER_CMD(1, 99), EVT_HELPER_OP(type_mask), + +#define STOP_ALL(type_mask) \ + EVT_HELPER_CMD(1, 100), EVT_HELPER_OP(type_mask), +#define START_ALL(type_mask) \ + EVT_HELPER_CMD(1, 101), EVT_HELPER_OP(type_mask), +#define STOP_OTHER(type_mask) \ + EVT_HELPER_CMD(1, 102), EVT_HELPER_OP(type_mask), +#define START_OTHER(type_mask) \ + EVT_HELPER_CMD(1, 103), EVT_HELPER_OP(type_mask), +#define STOP_ID(evt_id) \ + EVT_HELPER_CMD(1, 104), EVT_HELPER_OP(evt_id), +#define START_ID(evt_id) \ + EVT_HELPER_CMD(1, 105), EVT_HELPER_OP(evt_id), +#define CHK_EVT(evt_id) \ + EVT_HELPER_CMD(1, 106), EVT_HELPER_OP(evt_id), + +#define INLINE_EVT() \ + EVT_HELPER_CMD(0, 107), +#define INLINE_EVT_ID(out_id) \ + EVT_HELPER_CMD(1, 108), EVT_HELPER_OP(out_id), +#define END_INLINE() \ + EVT_HELPER_CMD(0, 109), + +#define BROTHER_EVT() \ + EVT_HELPER_CMD(0, 110), +#define BROTHER_EVT_ID(out_id) \ + EVT_HELPER_CMD(1, 111), EVT_HELPER_OP(out_id), +#define END_BROTHER() \ + EVT_HELPER_CMD(0, 112), + +#define DEBUG_PUT_MSG(msg) \ + EVT_HELPER_CMD(1, 113), EVT_HELPER_OP(msg) +#define DEBUG_MSG_CLEAR(msg) \ + EVT_HELPER_CMD(0, 114), +#define DEBUG_PUT_REG(reg) \ + EVT_HELPER_CMD(0, 115), EVT_HELPER_OP(reg), +#define DEBUG_NAME(name) \ + EVT_HELPER_CMD(1, 116), EVT_HELPER_OP(name), +#define DEBUG_REM(text) \ + EVT_HELPER_CMD(1, 117), EVT_HELPER_OP(text), +#define DEBUG_BP(text) \ + EVT_HELPER_CMD(0, 118), From c1ded4bb518ae6edd4b46e30863d0be45a8e91dc Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Tue, 26 Nov 2019 19:10:26 +0100 Subject: [PATCH 34/90] rel: Add header and symbols for evt_map.o --- ttyd-tools/rel/include/ttyd.eu.lst | 31 +++++++++ ttyd-tools/rel/include/ttyd.jp.lst | 31 +++++++++ ttyd-tools/rel/include/ttyd.us.lst | 31 +++++++++ ttyd-tools/rel/include/ttyd/evt_map.h | 97 +++++++++++++++++++++++++++ 4 files changed, 190 insertions(+) create mode 100644 ttyd-tools/rel/include/ttyd/evt_map.h diff --git a/ttyd-tools/rel/include/ttyd.eu.lst b/ttyd-tools/rel/include/ttyd.eu.lst index 4d38e8eb..5227b26f 100644 --- a/ttyd-tools/rel/include/ttyd.eu.lst +++ b/ttyd-tools/rel/include/ttyd.eu.lst @@ -131,6 +131,37 @@ // 80037a1c:make_pri_table 80037b48:evtGetWork +// evt_map.o +80037d74:evt_mapdisp_onoff +80037db4:evt_map_replace_mapobj +80037e8c:evt_map_entry_airport_harbor +80038028:check2 +800380b4:check +80038140:evt_map_get_flush_color +800381d4:evt_map_set_flush_color +8003829c:evt_map_set_flush_onoff +80038338:evt_map_set_tevcallback +80038398:evt_mapobj_get_position +80038464:evt_mapobj_clear_offscreen +800384c8:evt_mapobj_set_offscreen +80038550:evt_mapobj_flag_onoff +800385f8:evt_map_set_playrate +80038650:evt_map_replayanim +800386b4:evt_map_pauseanim +80038718:evt_map_checkanim +80038790:evt_map_playanim +8003880c:evt_mapobj_color +800388fc:evt_map_blend_set_mobj_flag +80038998:evt_map_blend_set_flag +80038b70:evt_map_blend_off +80038bb0:evt_map_set_blend +80038c84:evt_map_fog_onoff +80038cc4:evt_map_get_fog +80038d70:evt_map_set_fog +80038e50:evt_mapobj_scale +80038f20:evt_mapobj_rotate +80038ff0:evt_mapobj_trans + // mapdata.o 800390c0:relSetBtlAddr 80039170:relSetEvtAddr diff --git a/ttyd-tools/rel/include/ttyd.jp.lst b/ttyd-tools/rel/include/ttyd.jp.lst index c7d07e44..82b705a0 100644 --- a/ttyd-tools/rel/include/ttyd.jp.lst +++ b/ttyd-tools/rel/include/ttyd.jp.lst @@ -129,6 +129,37 @@ // 80037298:make_pri_table 800373c4:evtGetWork +// evt_map.o +800375f0:evt_mapdisp_onoff +80037630:evt_map_replace_mapobj +80037708:evt_map_entry_airport_harbor +800378a4:check2 +80037930:check +800379bc:evt_map_get_flush_color +80037a50:evt_map_set_flush_color +80037b18:evt_map_set_flush_onoff +80037bb4:evt_map_set_tevcallback +80037c14:evt_mapobj_get_position +80037ce0:evt_mapobj_clear_offscreen +80037d44:evt_mapobj_set_offscreen +80037dcc:evt_mapobj_flag_onoff +80037e74:evt_map_set_playrate +80037ecc:evt_map_replayanim +80037f30:evt_map_pauseanim +80037f94:evt_map_checkanim +8003800c:evt_map_playanim +80038088:evt_mapobj_color +80038178:evt_map_blend_set_mobj_flag +80038214:evt_map_blend_set_flag +800383ec:evt_map_blend_off +8003842c:evt_map_set_blend +80038500:evt_map_fog_onoff +80038540:evt_map_get_fog +800385ec:evt_map_set_fog +800386cc:evt_mapobj_scale +8003879c:evt_mapobj_rotate +8003886c:evt_mapobj_trans + // mapdata.o 8003893c:relSetBtlAddr 800389ec:relSetEvtAddr diff --git a/ttyd-tools/rel/include/ttyd.us.lst b/ttyd-tools/rel/include/ttyd.us.lst index ff8a28fc..f57df074 100644 --- a/ttyd-tools/rel/include/ttyd.us.lst +++ b/ttyd-tools/rel/include/ttyd.us.lst @@ -131,6 +131,37 @@ // 80037934:make_pri_table 80037a60:evtGetWork +// evt_map.o +80037c8c:evt_mapdisp_onoff +80037ccc:evt_map_replace_mapobj +80037da4:evt_map_entry_airport_harbor +80037f40:check2 +80037fcc:check +80038058:evt_map_get_flush_color +800380ec:evt_map_set_flush_color +800381b4:evt_map_set_flush_onoff +80038250:evt_map_set_tevcallback +800382b0:evt_mapobj_get_position +8003837c:evt_mapobj_clear_offscreen +800383e0:evt_mapobj_set_offscreen +80038468:evt_mapobj_flag_onoff +80038510:evt_map_set_playrate +80038568:evt_map_replayanim +800385cc:evt_map_pauseanim +80038630:evt_map_checkanim +800386a8:evt_map_playanim +80038724:evt_mapobj_color +80038814:evt_map_blend_set_mobj_flag +800388b0:evt_map_blend_set_flag +80038a88:evt_map_blend_off +80038ac8:evt_map_set_blend +80038b9c:evt_map_fog_onoff +80038bdc:evt_map_get_fog +80038c88:evt_map_set_fog +80038d68:evt_mapobj_scale +80038e38:evt_mapobj_rotate +80038f08:evt_mapobj_trans + // mapdata.o 80038fd8:relSetBtlAddr 80039088:relSetEvtAddr diff --git a/ttyd-tools/rel/include/ttyd/evt_map.h b/ttyd-tools/rel/include/ttyd/evt_map.h new file mode 100644 index 00000000..26066a7f --- /dev/null +++ b/ttyd-tools/rel/include/ttyd/evt_map.h @@ -0,0 +1,97 @@ +#include + +#include "evtmgr.h" + +namespace ttyd::evt_map { + +extern "C" { + +// evt_mapobj_trans(int unused, char *name, float x, float y, float z) +EVT_DECLARE_USER_FUNC(evt_mapobj_trans, 5) + +// evt_mapobj_rotate(int unused, char *name, float x, float y, float z): +EVT_DECLARE_USER_FUNC(evt_mapobj_rotate, 5) + +// evt_mapobj_scale(int unused, char *name, float x, float y, float z) +EVT_DECLARE_USER_FUNC(evt_mapobj_scale, 5) + +// evt_map_set_fog(int mode, float start, float end, int r, int g, int b) +EVT_DECLARE_USER_FUNC(evt_map_set_fog, 6) + +// evt_map_get_fog(int &mode, float &start, float &end, int &r, int &g, int &b) +EVT_DECLARE_USER_FUNC(evt_map_get_fog, 6) + +// evt_map_fog_onoff(int on) +EVT_DECLARE_USER_FUNC(evt_map_fog_onoff, 1) + +// evt_map_set_blend(int use_blend2, int r, int g, int b, int a) +EVT_DECLARE_USER_FUNC(evt_map_set_blend, 5) + +// evt_map_blend_off(int use_blend2) +EVT_DECLARE_USER_FUNC(evt_map_blend_off, 2) + +// evt_map_set_flag(int on, char *name, int flags) +EVT_DECLARE_USER_FUNC(evt_map_set_flag, 3) + +// evt_map_set_mobj_flag(int on, char *name, int flags) +EVT_DECLARE_USER_FUNC(evt_map_set_mobj_flag, 3) + +// evt_mapobj_color(int use_group, char *name, int r, int g, int b, int a) +EVT_DECLARE_USER_FUNC(evt_mapobj_color, 6) + +// evt_map_playanim(char *name, int w_time_mode, int w_clock) +EVT_DECLARE_USER_FUNC(evt_map_playanim, 3) + +// evt_map_checkanim(char *name, int &done, int &ms_left) +EVT_DECLARE_USER_FUNC(evt_map_checkanim, 3) + +// evt_map_pauseanim(int pause_all, char *name) +EVT_DECLARE_USER_FUNC(evt_map_pauseanim, 2) + +// evt_map_replayanim(int replay_all, char *name) +EVT_DECLARE_USER_FUNC(evt_map_replayanim, 2) + +// evt_map_set_playrate(char *name, float rate) +EVT_DECLARE_USER_FUNC(evt_map_set_playrate, 2) + +// evt_mapobj_flag_onoff(int use_group, int on, char *name, raw mask) +EVT_DECLARE_USER_FUNC(evt_mapobj_flag_onoff, 4) + +// evt_mapobj_set_offscreen(int use_group, char *mapobj_name, char *offscreen_name) +EVT_DECLARE_USER_FUNC(evt_mapobj_set_offscreen, 3) + +// evt_mapobj_clear_offscreen(int use_group, char *mapobj_name) +EVT_DECLARE_USER_FUNC(evt_mapobj_clear_offscreen, 2) + +// evt_mapobj_get_position(char *name, int &x, int &y, int &z) +EVT_DECLARE_USER_FUNC(evt_mapobj_get_position, 4) + +// evt_map_set_tevcallback(int index, void (*callback)(MapTevCallbackInfo *)) +EVT_DECLARE_USER_FUNC(evt_map_set_tevcallback, 2) + +// evt_map_set_flush_onoff(int use_group, int on, char *mapobj_name) +EVT_DECLARE_USER_FUNC(evt_map_set_flush_onoff, 3) + +// evt_map_set_flush_color(int use_group, char *mapobj_name, int r, int g, int b, int a) +EVT_DECLARE_USER_FUNC(evt_map_set_flush_color, 6) + +// evt_map_get_flush_color(char *mapobj_name, int &r, int &g, int &b, int &a) +EVT_DECLARE_USER_FUNC(evt_map_get_flush_color, 5) + +// check(int &is_riding) +// Checks if the player is riding the mapobj with the name passed in LW(1). +EVT_DECLARE_USER_FUNC(check, 1) + +// check2(int &is_not_riding) +// Checks if the player is not riding the mapobj with the nammed passed in LW(1). +EVT_DECLARE_USER_FUNC(check2, 1) + +// evt_map_entry_airport_harbor(int mode, char *mapobj_name, int w_unknown) +EVT_DECLARE_USER_FUNC(evt_map_entry_airport_harbor, 3) + +// evt_map_replace_mapobj(char *mapobj_name, int mode) +EVT_DECLARE_USER_FUNC(evt_map_replace_mapobj, 2) + +} + +} \ No newline at end of file From f5a6744f354ea8798735085f6ef5a7af9515d7e5 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 27 Nov 2019 23:45:47 +0100 Subject: [PATCH 35/90] rel: Add missing header guard in evt_map.h --- ttyd-tools/rel/include/ttyd/evt_map.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ttyd-tools/rel/include/ttyd/evt_map.h b/ttyd-tools/rel/include/ttyd/evt_map.h index 26066a7f..5e8fbca9 100644 --- a/ttyd-tools/rel/include/ttyd/evt_map.h +++ b/ttyd-tools/rel/include/ttyd/evt_map.h @@ -1,3 +1,5 @@ +#pragma once + #include #include "evtmgr.h" From 944d6075382f03158fadec4b672aed4b5a914c00 Mon Sep 17 00:00:00 2001 From: PistonMiner Date: Wed, 27 Nov 2019 23:46:54 +0100 Subject: [PATCH 36/90] docs: Add original developer mnemonics to evt reference --- ttyd-tools/docs/ttyd-opc-summary.txt | 516 +++++++++++++-------------- 1 file changed, 258 insertions(+), 258 deletions(-) diff --git a/ttyd-tools/docs/ttyd-opc-summary.txt b/ttyd-tools/docs/ttyd-opc-summary.txt index 70c377b4..5f341b0e 100644 --- a/ttyd-tools/docs/ttyd-opc-summary.txt +++ b/ttyd-tools/docs/ttyd-opc-summary.txt @@ -1,258 +1,258 @@ -| Hex| Dec | Mnemonic | Summary -+----+-----+------------------------+------------------------------------- -| 00 | 0 | | Internal - fetch next instruction -| 01 | 1 | end | Script End -| 02 | 2 | return | Return -| 03 | 3 |