-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdb2.lua
More file actions
352 lines (337 loc) · 10.4 KB
/
db2.lua
File metadata and controls
352 lines (337 loc) · 10.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
local vstruct = require('vstruct')
local header = vstruct.compile([[<
record_count: u4
field_count: u4
record_size: u4
string_table_size: u4
table_hash: u4
layout_hash: u4
min_id: u4
max_id: u4
locale: u4
flags: { [ 2 | x13 ignore_id_index: b1 collectable: b1 has_offset_map: b1 ] }
id_index: u2
total_field_count: u4
bitpacked_data_offset: u4
lookup_column_count: u4
field_storage_info_size: u4
common_data_size: u4
pallet_data_size: u4
section_count: u4
]])
local section_header = vstruct.compile([[<
tact_key_hash: s8
file_offset: u4
record_count: u4
string_table_size: u4
offset_records_end: u4
id_list_size: u4
relationship_data_size: u4
offset_map_id_count: u4
copy_table_count: u4
]])
local field_storage_info = vstruct.compile([[<
field_offset_bits: u2
field_size_bits: u2
additional_data_size: u4
storage_type: u4
cx1: u4
cx2: u4
cx3: u4
]])
local strbyte = string.byte
local strfind = string.find
local strsub = string.sub
local function u1(content, offset)
return strbyte(content, offset + 1)
end
local function u2(content, offset)
local w, x = strbyte(content, offset + 1, offset + 2)
return w + x * 256
end
local function u3(content, offset)
local w, x, y = strbyte(content, offset + 1, offset + 3)
return w + x * 256 + y * 65536
end
local function u4(content, offset)
local w, x, y, z = strbyte(content, offset + 1, offset + 4)
return w + x * 256 + y * 65536 + z * 16777216
end
local function u5(content, offset)
local w, x, y, z, zz = strbyte(content, offset + 1, offset + 5)
return w + x * 256 + y * 65536 + z * 16777216 + zz * 4294967296
end
local un = {
[1] = u1,
[2] = u2,
[3] = u3,
[4] = u4,
[5] = u5,
}
local function z(content, offset)
local e = assert(strfind(content, '\0', offset + 1, true))
return strsub(content, offset + 1, e - 1)
end
local function div(a, b)
return math.floor(a / b)
end
local zerohash = '\0\0\0\0\0\0\0\0'
local function rows(content, dbdef)
local fields = {}
local idin = nil
local idout = nil
local relout = nil
for i, f in ipairs(dbdef) do
if f.id then
assert(not idout)
idout = i
end
if not f.noninline then
table.insert(fields, {
index = i,
length = f.length,
signed = not f.unsigned and f.type ~= 'float',
string = f.type == 'string',
})
if f.id then
idin = #fields
end
elseif f.relation then
assert(not relout)
relout = i
end
end
assert(idout, 'no id field?')
local cur = vstruct.cursor(content)
local magic = content:sub(1, 4)
if magic == 'WDC4' then
cur:seek(nil, 4)
elseif magic == 'WDC5' then
cur:seek(nil, 136)
else
error('unexpected magic ' .. magic)
end
local h = header:read(cur)
assert(h.section_count >= 0)
if h.section_count == 0 then
-- Easier to bail than rewrite the invariants for this case.
return function() end
end
assert(h.total_field_count * 24 == h.field_storage_info_size)
assert(h.flags.collectable == false)
assert(h.flags.has_offset_map == false)
assert(h.total_field_count == #fields)
assert(h.flags.ignore_id_index or h.id_index == idin - 1)
local shs = {}
for i = 1, h.section_count do
local sh = section_header:read(cur)
assert(sh.id_list_size == 0 or sh.record_count * 4 == sh.id_list_size)
assert((i == 1) == (sh.tact_key_hash == zerohash))
table.insert(shs, sh)
end
cur:seek(nil, h.total_field_count * 4) -- ignore struct field_structure
local fsis = {}
local common_offsets = {}
local common_offset = 0
local pallet_offsets = {}
local pallet_offset = 0
for i = 1, h.total_field_count do
local fsi = field_storage_info:read(cur)
if fsi.storage_type == 0 then
assert(fsi.additional_data_size == 0)
assert(fsi.field_offset_bits % 8 == 0)
assert(fsi.field_offset_bits < h.bitpacked_data_offset * 8)
assert(fsi.field_size_bits % 8 == 0)
if fields[i].length then
assert(fsi.field_size_bits == fields[i].length * 32)
-- TODO support length > 1
fsi.field_size_bits = 32
else
assert(fsi.field_size_bits <= 32)
end
assert(fsi.cx1 == 0)
assert(fsi.cx2 == 0)
assert(fsi.cx3 == 0)
elseif fsi.storage_type == 2 then
assert(fsi.field_size_bits == 0)
assert(fsi.cx2 == 0)
assert(fsi.cx3 == 0)
elseif fsi.storage_type == 3 then
assert(fsi.field_size_bits > 0)
assert(fsi.field_size_bits <= 32)
assert(fsi.field_offset_bits >= h.bitpacked_data_offset * 8)
assert(fsi.additional_data_size > 0)
assert(fsi.cx3 == 0)
elseif fsi.storage_type == 4 then
assert(fsi.field_size_bits > 0)
assert(fsi.field_size_bits <= 32)
assert(fsi.field_offset_bits >= h.bitpacked_data_offset * 8)
assert(fsi.additional_data_size > 0)
assert(fsi.cx3 > 0)
elseif fsi.storage_type == 1 or fsi.storage_type == 5 then
assert(fsi.field_size_bits > 0)
assert(fsi.field_size_bits <= 32)
assert(fsi.field_offset_bits >= h.bitpacked_data_offset * 8)
assert(fsi.additional_data_size == 0)
assert(fsi.cx3 == 0)
else
error('unsupported storage type ' .. fsi.storage_type)
end
table.insert(fsis, fsi)
table.insert(common_offsets, common_offset)
table.insert(pallet_offsets, pallet_offset)
if fsi.storage_type == 2 then
common_offset = common_offset + fsi.additional_data_size
elseif fsi.storage_type == 3 or fsi.storage_type == 4 then
pallet_offset = pallet_offset + fsi.additional_data_size
end
end
assert(common_offset == h.common_data_size)
assert(pallet_offset == h.pallet_data_size)
local palletpos = cur.pos
local commonpos = palletpos + h.pallet_data_size
local encpos = commonpos + h.common_data_size
local pos = encpos
for _, sh in ipairs(shs) do
if sh.tact_key_hash ~= zerohash then
local count = u4(content, pos)
pos = pos + 4 + count * 4
end
end
for _, sh in ipairs(shs) do
assert(pos == sh.file_offset)
pos = pos + sh.record_count * h.record_size
pos = pos + sh.string_table_size
pos = pos + sh.id_list_size
pos = pos + sh.copy_table_count * 8
pos = pos + sh.offset_map_id_count * 6
pos = pos + sh.relationship_data_size
pos = pos + sh.offset_map_id_count * 4
end
assert(pos == #content)
local commons = {}
for i, fsi in ipairs(fsis) do
local common = {}
if fsi.storage_type == 2 then
local start = commonpos + common_offsets[i]
for c = start, start + fsi.additional_data_size - 1, 8 do
local recordid = u4(content, c)
local value = u4(content, c + 4)
common[recordid] = value
end
end
table.insert(commons, common)
end
local strings = {}
for _, sh in ipairs(shs) do
local off = sh.file_offset + sh.record_count * h.record_size
table.insert(strings, content:sub(off + 1, off + sh.string_table_size))
end
strings = table.concat(strings)
do
local roffset = 0
for i = #shs, 1, -1 do
shs[i].xoffset = roffset
roffset = roffset + shs[i].record_count * h.record_size
end
end
return coroutine.wrap(function()
for _, sh in ipairs(shs) do
local rpos = sh.file_offset
local spos = rpos + sh.record_count * h.record_size
local ipos = spos + sh.string_table_size
local cpos = ipos + sh.id_list_size
local fpos = cpos + sh.copy_table_count * 8 + sh.offset_map_id_count * 6
local copytable = {}
for _ = 1, sh.copy_table_count do
local newid = u4(content, cpos)
local copiedid = u4(content, cpos + 4)
copytable[copiedid] = copytable[copiedid] or {}
table.insert(copytable[copiedid], newid)
cpos = cpos + 8
end
local relmap = {}
if sh.relationship_data_size > 0 then
local n = u4(content, fpos)
assert(n == 0 or n * 8 + 12 == sh.relationship_data_size)
for p = fpos + 12, fpos + sh.relationship_data_size - 1, 8 do
local foreign_key = u4(content, p)
local record_index = u4(content, p + 4)
relmap[record_index + 1] = foreign_key
end
end
for i = 1, sh.record_count do
local t = {}
for k, f in ipairs(fields) do
local fsi = fsis[k]
local fob = fsi.field_offset_bits
local fsb = fsi.field_size_bits
if fsi.storage_type == 0 then
local foffset = fob / 8
local v = un[fsb / 8](content, rpos + foffset)
if not f.string then
t[f.index] = v
elseif v == 0 then
t[f.index] = ''
else
local s = rpos + foffset + v - sh.xoffset - spos
t[f.index] = z(strings, s)
end
elseif fsi.storage_type ~= 2 then
local loff = div(fob, 8)
local hoff = div(fob + fsb - 1, 8)
local v = un[hoff - loff + 1](content, rpos + loff)
local vv = div(v, 2 ^ (fob % 8)) % (2 ^ fsb)
if fsi.storage_type == 1 or fsi.storage_type == 5 then
t[f.index] = vv
elseif fsi.storage_type == 3 then
local p = u4(content, palletpos + pallet_offsets[k] + vv * 4)
if f.signed and p >= 2 ^ 31 then
p = p - 2 ^ 32
end
t[f.index] = p
elseif fsi.storage_type == 4 then
-- TODO support pallet arrays
t[f.index] = u4(content, palletpos + pallet_offsets[k] + vv * 4 * fsi.cx3)
else
error('internal error')
end
end
end
if relout then
t[relout] = relmap[i]
end
if idout and not idin then
assert(sh.id_list_size > 0)
t[idout] = u4(content, ipos)
ipos = ipos + 4
end
if t[idout] ~= 0 then
for k, f in ipairs(fields) do
local fsi = fsis[k]
if fsi.storage_type == 2 then
t[f.index] = commons[k][t[idout]] or fsi.cx1
end
end
local copies = {}
for _, newid in ipairs(copytable[t[idout]] or {}) do
local tt = {}
for k = 1, #dbdef do
tt[k] = t[k]
end
tt[idout] = newid
table.insert(copies, tt)
end
coroutine.yield(t)
for k = #copies, 1, -1 do
coroutine.yield(copies[k])
end
end
rpos = rpos + h.record_size
end
end
end)
end
return {
rows = rows,
}