mirror of
https://gitlab.com/wgp/dougal/software.git
synced 2025-12-06 09:57:09 +00:00
Compare commits
1112 Commits
62-service
...
v2025.33.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8e4e70cbdc | ||
|
|
4dadffbbe7 | ||
|
|
24dcebd0d9 | ||
|
|
12a762f44f | ||
|
|
ebf13abc28 | ||
|
|
b3552db02f | ||
|
|
cd882c0611 | ||
|
|
6fc9c020a4 | ||
|
|
75284322f1 | ||
|
|
e849c47f01 | ||
|
|
387d20a4f0 | ||
|
|
2fab06d340 | ||
|
|
7d2fb5558a | ||
|
|
764e2cfb23 | ||
|
|
bf1af1f76c | ||
|
|
09e4cd2467 | ||
|
|
2009d73a2b | ||
|
|
083ee812de | ||
|
|
84510e8dc9 | ||
|
|
7205ec42a8 | ||
|
|
73d85ef81f | ||
|
|
6c4dc35461 | ||
|
|
a5ebff077d | ||
|
|
2a894692ce | ||
|
|
25690eeb52 | ||
|
|
3f9776b61d | ||
|
|
8c81daefc0 | ||
|
|
c173610e87 | ||
|
|
301e5c0731 | ||
|
|
48d9f45fe0 | ||
|
|
cd23a78592 | ||
|
|
e368183bf0 | ||
|
|
02477b071b | ||
|
|
6651868ea7 | ||
|
|
c0b52a8245 | ||
|
|
90ce6f063e | ||
|
|
b2fa0c3d40 | ||
|
|
83ecaad4fa | ||
|
|
1c5fd2e34d | ||
|
|
aabcc74891 | ||
|
|
2a7b51b995 | ||
|
|
5d19ca7ca7 | ||
|
|
910195fc0f | ||
|
|
6e5570aa7c | ||
|
|
595c20f504 | ||
|
|
40d0038d80 | ||
|
|
acdf118a67 | ||
|
|
b9e0975d3d | ||
|
|
39d9c9d748 | ||
|
|
b8b25dcd62 | ||
|
|
db97382758 | ||
|
|
ae8e5d4ef6 | ||
|
|
2c1a24e4a5 | ||
|
|
0b83187372 | ||
|
|
3dd51c82ea | ||
|
|
17e6564e70 | ||
|
|
3a769e7fd0 | ||
|
|
7dde0a15c6 | ||
|
|
2872af8d60 | ||
|
|
4e581d5664 | ||
|
|
a188e9a099 | ||
|
|
cd6ad92d5c | ||
|
|
08dfe7ef0a | ||
|
|
6a5238496e | ||
|
|
bc237cb685 | ||
|
|
4957142fb1 | ||
|
|
5a19c81ed1 | ||
|
|
b583dc6c02 | ||
|
|
134e3bce4e | ||
|
|
f5ad9d7182 | ||
|
|
07874ffe0b | ||
|
|
695add5da6 | ||
|
|
6a94287cba | ||
|
|
c2ec2970f0 | ||
|
|
95d6d0054b | ||
|
|
5070be5ff3 | ||
|
|
d5e77bc946 | ||
|
|
f6faad17db | ||
|
|
94cdf83b13 | ||
|
|
6a788ae28b | ||
|
|
544117eec3 | ||
|
|
e5679ec14b | ||
|
|
a1c174994c | ||
|
|
2db8cc3116 | ||
|
|
99b1a841c5 | ||
|
|
6629e25644 | ||
|
|
7f5f64acb1 | ||
|
|
8f87df1e2f | ||
|
|
8399782409 | ||
|
|
9c86018653 | ||
|
|
a15c97078b | ||
|
|
d769ec48dd | ||
|
|
fe421f545c | ||
|
|
caa8fec8cc | ||
|
|
49fc260ace | ||
|
|
b7038f542c | ||
|
|
40ad0e7650 | ||
|
|
9006deb8be | ||
|
|
6e19b8e18f | ||
|
|
3d474ad8f8 | ||
|
|
821af18f29 | ||
|
|
9cf15ce9dd | ||
|
|
78838cbc41 | ||
|
|
8855da743b | ||
|
|
c67a60a7e6 | ||
|
|
81e06930f0 | ||
|
|
0263eab6d1 | ||
|
|
931219850e | ||
|
|
12369d5419 | ||
|
|
447003c3b5 | ||
|
|
be7157b62c | ||
|
|
8ef56f9946 | ||
|
|
f2df16fe55 | ||
|
|
96db6b1376 | ||
|
|
36d86c176a | ||
|
|
9c38af4bc0 | ||
|
|
be5c6f1fa3 | ||
|
|
17b9d60715 | ||
|
|
e2dd563054 | ||
|
|
67dcc2922b | ||
|
|
11e84f47eb | ||
|
|
1066a03b25 | ||
|
|
08440e3e21 | ||
|
|
d46eb3b455 | ||
|
|
864b430320 | ||
|
|
61cbefd0e9 | ||
|
|
29c484affa | ||
|
|
0806b80445 | ||
|
|
b5a3a22892 | ||
|
|
c13aa23e2f | ||
|
|
3366377ab0 | ||
|
|
59a90e352c | ||
|
|
0f207f8c2d | ||
|
|
c97eaa64f5 | ||
|
|
5b82f8540d | ||
|
|
d977d9c40b | ||
|
|
d16fb41f24 | ||
|
|
c376896ea6 | ||
|
|
2bcdee03d5 | ||
|
|
44113c89c0 | ||
|
|
17c6d9d1e5 | ||
|
|
06cc16721f | ||
|
|
af7485370c | ||
|
|
ad013ea642 | ||
|
|
48d5986415 | ||
|
|
471f4e8e64 | ||
|
|
4be99370e6 | ||
|
|
e464f5f887 | ||
|
|
cc8d790ad8 | ||
|
|
32c6e2c79f | ||
|
|
ba7221ae10 | ||
|
|
1cb9d4b1e2 | ||
|
|
2a0025cdbf | ||
|
|
f768f31b62 | ||
|
|
9f91b1317f | ||
|
|
3b69a15703 | ||
|
|
cd3bd8ab79 | ||
|
|
df193a99cd | ||
|
|
580e94a591 | ||
|
|
3413641c10 | ||
|
|
f092aff015 | ||
|
|
94c6406ea2 | ||
|
|
244d84a3bd | ||
|
|
89c565a0f5 | ||
|
|
31ac8d3c01 | ||
|
|
3bb78040b0 | ||
|
|
1433bda14e | ||
|
|
c0ae033de8 | ||
|
|
05eed7ef26 | ||
|
|
5d2ca513a6 | ||
|
|
b9c8069828 | ||
|
|
b80b8ffb52 | ||
|
|
c2eb82ffe7 | ||
|
|
e517e2f771 | ||
|
|
0afd54447f | ||
|
|
e6004dd62f | ||
|
|
f623954399 | ||
|
|
f8d882da5d | ||
|
|
808c9987af | ||
|
|
4db6d8dd7a | ||
|
|
9a47977f5f | ||
|
|
a58cce8565 | ||
|
|
5487a3a49b | ||
|
|
731778206c | ||
|
|
08e65b512d | ||
|
|
9b05388113 | ||
|
|
1b44389a1a | ||
|
|
0b3711b759 | ||
|
|
5a523d4941 | ||
|
|
122951e3a2 | ||
|
|
90216c12e4 | ||
|
|
9c26909a59 | ||
|
|
0427a3c18c | ||
|
|
c32e6f2b38 | ||
|
|
546d199c52 | ||
|
|
6562de97b9 | ||
|
|
c666a6368e | ||
|
|
d5af6df052 | ||
|
|
0c5ea7f30a | ||
|
|
302642f88d | ||
|
|
48e1369088 | ||
|
|
daa700e7dc | ||
|
|
8db2c8ce25 | ||
|
|
890e48e078 | ||
|
|
11829555cf | ||
|
|
07d8e97f74 | ||
|
|
fc379aba14 | ||
|
|
8cbacb9aa7 | ||
|
|
acb59035e4 | ||
|
|
b7d0ee7da7 | ||
|
|
3a0f720f2f | ||
|
|
6cf6fe29f4 | ||
|
|
6f0f2dadcc | ||
|
|
64fba1adc3 | ||
|
|
3ea82cb660 | ||
|
|
84c1385f88 | ||
|
|
b1b7332216 | ||
|
|
8e7451e17a | ||
|
|
bdeb2b8742 | ||
|
|
ccfabf84f7 | ||
|
|
5d4e219403 | ||
|
|
3b7e4c9f0b | ||
|
|
683f5680b1 | ||
|
|
ce901a03a1 | ||
|
|
f8e5b74c1a | ||
|
|
ec41d26a7a | ||
|
|
386fd59900 | ||
|
|
e47020a21e | ||
|
|
b8f58ac67c | ||
|
|
b3e27ed1b9 | ||
|
|
f5441d186f | ||
|
|
d58bc4d62e | ||
|
|
01d1691def | ||
|
|
bc444fc066 | ||
|
|
989ec84852 | ||
|
|
065f6617af | ||
|
|
825530c1fe | ||
|
|
1ef8eb871f | ||
|
|
2e9c603ab8 | ||
|
|
7f067ff760 | ||
|
|
487c297747 | ||
|
|
cfa771a830 | ||
|
|
3905e6f5d8 | ||
|
|
2657c42dcc | ||
|
|
63e6af545a | ||
|
|
d6fb7404b1 | ||
|
|
8188766a81 | ||
|
|
b7ae657137 | ||
|
|
1295ec2ee3 | ||
|
|
7c6d3fe5ee | ||
|
|
15570e0f3d | ||
|
|
d551e67042 | ||
|
|
6b216f7406 | ||
|
|
a7e02c526b | ||
|
|
55855d66e9 | ||
|
|
ae79d90fef | ||
|
|
c8b2047483 | ||
|
|
d21cde20fc | ||
|
|
10580ea3ec | ||
|
|
25f83d1eb3 | ||
|
|
dc294b5b50 | ||
|
|
b035d3481c | ||
|
|
ca4a14ffd9 | ||
|
|
d77f7f66db | ||
|
|
6b6f545b9f | ||
|
|
bdf62e2d8b | ||
|
|
1895168889 | ||
|
|
8c875ea2f9 | ||
|
|
addbe2d572 | ||
|
|
85f092b9e1 | ||
|
|
eb99d74e4a | ||
|
|
e65afdcaa1 | ||
|
|
0b7e9e1d01 | ||
|
|
9ad17de4cb | ||
|
|
071fd7438b | ||
|
|
9cc21ba06a | ||
|
|
712b20c596 | ||
|
|
8bbe3aee70 | ||
|
|
dc22bb95fd | ||
|
|
0ef2e60d15 | ||
|
|
289d50d9c1 | ||
|
|
3189a06d75 | ||
|
|
9ef551db76 | ||
|
|
e6669026fa | ||
|
|
12082b91a3 | ||
|
|
7db9155899 | ||
|
|
f8692afad3 | ||
|
|
028cab5188 | ||
|
|
fc73fbfb9f | ||
|
|
96a8d3689a | ||
|
|
7a7106e735 | ||
|
|
d5a10ca273 | ||
|
|
e398f2d3cd | ||
|
|
d154e75797 | ||
|
|
af0df23cc4 | ||
|
|
ec26285e53 | ||
|
|
83b3ec5103 | ||
|
|
86aaade428 | ||
|
|
fbb4e1efaf | ||
|
|
73fb7a5053 | ||
|
|
bc5dfe9c2a | ||
|
|
524420d945 | ||
|
|
e48c734ea9 | ||
|
|
5aaad01238 | ||
|
|
90782c1b09 | ||
|
|
4368cb8571 | ||
|
|
40bc1f9293 | ||
|
|
8c6eefed97 | ||
|
|
59971a43fe | ||
|
|
a2a5a783a3 | ||
|
|
d3bdeff8df | ||
|
|
4a2bed257d | ||
|
|
995e0b9f81 | ||
|
|
3488c8bf4d | ||
|
|
7e1023f6e8 | ||
|
|
41e058ac64 | ||
|
|
2086133109 | ||
|
|
bb70cf1a3d | ||
|
|
be0d7b269f | ||
|
|
934b921f69 | ||
|
|
c20b3b64c7 | ||
|
|
8ec918bc7c | ||
|
|
6fa0f8e659 | ||
|
|
a9f93cfd17 | ||
|
|
9785f4541b | ||
|
|
62ab06b4a7 | ||
|
|
c7270febfc | ||
|
|
2dffd93cfe | ||
|
|
867a534910 | ||
|
|
60aaaf9e04 | ||
|
|
b64a99ab19 | ||
|
|
69fce0e0dc | ||
|
|
8dd971ffec | ||
|
|
fd84eb1ebb | ||
|
|
53b4213a05 | ||
|
|
3fbc266809 | ||
|
|
66a758d91f | ||
|
|
6cebf376d0 | ||
|
|
02adbdf530 | ||
|
|
2357381ee6 | ||
|
|
5245e6a135 | ||
|
|
d93b8f8a9c | ||
|
|
8b47fc4753 | ||
|
|
a0b3568a10 | ||
|
|
8895a948cf | ||
|
|
afe04f5693 | ||
|
|
c3a56bf7e2 | ||
|
|
18fcf42bc3 | ||
|
|
ad48ac9998 | ||
|
|
7ab6be5c67 | ||
|
|
2f56d377c5 | ||
|
|
d1c041995d | ||
|
|
399e86be87 | ||
|
|
13f68d7314 | ||
|
|
80de0c1bb0 | ||
|
|
26a487aa47 | ||
|
|
53e7a06a18 | ||
|
|
efe64f0a8c | ||
|
|
313e9687bd | ||
|
|
09fb653812 | ||
|
|
0137bd84d5 | ||
|
|
f82f2c78c7 | ||
|
|
9f1fc3d19c | ||
|
|
873d7cfea7 | ||
|
|
2fa9d99eeb | ||
|
|
12b28cbb8d | ||
|
|
436a9b8289 | ||
|
|
b3dbc0f417 | ||
|
|
6d417a9272 | ||
|
|
b74419f770 | ||
|
|
cae57e2a64 | ||
|
|
cd739e603f | ||
|
|
beeba966dd | ||
|
|
544c4ead76 | ||
|
|
4486fc4afc | ||
|
|
a55d2cc6fc | ||
|
|
402a3f9cce | ||
|
|
1801fdb052 | ||
|
|
be904d8a00 | ||
|
|
2131cdf0c1 | ||
|
|
15242de2d9 | ||
|
|
b4aed52976 | ||
|
|
1b85b5cd4b | ||
|
|
f157f49312 | ||
|
|
3d42ce6fbc | ||
|
|
4595dddc24 | ||
|
|
642f5a7585 | ||
|
|
e7c29ba14c | ||
|
|
d919fb12db | ||
|
|
c21f9c239e | ||
|
|
2fb1c5fdcc | ||
|
|
c6b99563d9 | ||
|
|
76a90df768 | ||
|
|
ea8ea12429 | ||
|
|
7bd2319cd7 | ||
|
|
a9270157ea | ||
|
|
d2f94dbb88 | ||
|
|
1056122fff | ||
|
|
9bd0aca18f | ||
|
|
60932300c1 | ||
|
|
12307b7ae6 | ||
|
|
ceeaa4a8f3 | ||
|
|
3da54f9334 | ||
|
|
4c612ffe0a | ||
|
|
7076b51a25 | ||
|
|
fe5ca06060 | ||
|
|
71467dddf9 | ||
|
|
246f01efbe | ||
|
|
68bf853594 | ||
|
|
4a18cb8a81 | ||
|
|
c615727acf | ||
|
|
2e21526fca | ||
|
|
3709070985 | ||
|
|
2ac963aa4f | ||
|
|
db7b385d66 | ||
|
|
d91a1b1302 | ||
|
|
fa031d5fc9 | ||
|
|
620d5ccf47 | ||
|
|
f0fa2b75d5 | ||
|
|
46bb207dfb | ||
|
|
f7a386d179 | ||
|
|
e4607a095b | ||
|
|
4b0d42390f | ||
|
|
114e41557f | ||
|
|
e605320503 | ||
|
|
6606c7a6c1 | ||
|
|
e3bf671a49 | ||
|
|
3e08dfd45b | ||
|
|
f968cf3b3c | ||
|
|
b148ed2368 | ||
|
|
cb35e340e1 | ||
|
|
6c00f16b7e | ||
|
|
ca8dd68d10 | ||
|
|
656f776262 | ||
|
|
e1b40547f1 | ||
|
|
98021441bc | ||
|
|
4a8d3a99c1 | ||
|
|
7dee457fa1 | ||
|
|
bccac446e5 | ||
|
|
535b3bcc12 | ||
|
|
11e84a7e72 | ||
|
|
5ef55a9d8e | ||
|
|
f53e15df93 | ||
|
|
cf887b7852 | ||
|
|
a917976a3a | ||
|
|
c201229891 | ||
|
|
7ac997cd7d | ||
|
|
08e6c4a2de | ||
|
|
2c21f8f7ef | ||
|
|
a76aefe418 | ||
|
|
8d825fc53b | ||
|
|
b039a5f1fd | ||
|
|
5c1218e95e | ||
|
|
1bb5e2a41d | ||
|
|
1576b121e6 | ||
|
|
a06cdde449 | ||
|
|
121131e910 | ||
|
|
9136e9655d | ||
|
|
c646944886 | ||
|
|
0e664fc095 | ||
|
|
1498891004 | ||
|
|
89cb237f8d | ||
|
|
3386c57670 | ||
|
|
7285de5ec4 | ||
|
|
a95059f5e5 | ||
|
|
1ac81c34ce | ||
|
|
22387ba215 | ||
|
|
b77d41e952 | ||
|
|
aeecb7db7d | ||
|
|
ac9a683135 | ||
|
|
17a58f1396 | ||
|
|
b2a97a1987 | ||
|
|
f684e3e8d6 | ||
|
|
219425245f | ||
|
|
31419e860e | ||
|
|
65481d3086 | ||
|
|
d64a1fcee7 | ||
|
|
2365789d48 | ||
|
|
4c2a2617a1 | ||
|
|
5021888d03 | ||
|
|
bf633f7fdf | ||
|
|
847f49ad7c | ||
|
|
171feb9dd2 | ||
|
|
503a0de12f | ||
|
|
cf89a43f64 | ||
|
|
680e376ed1 | ||
|
|
a26974670a | ||
|
|
16a6cb59dc | ||
|
|
829e206831 | ||
|
|
83244fcd1a | ||
|
|
d9a6c77d0c | ||
|
|
b5aafe42ad | ||
|
|
025f3f774d | ||
|
|
f26e746c2b | ||
|
|
39eaf17121 | ||
|
|
1bb06938b1 | ||
|
|
851369a0b4 | ||
|
|
5065d62443 | ||
|
|
2d1e1e9532 | ||
|
|
051049581a | ||
|
|
da5ae18b0b | ||
|
|
ac9353c101 | ||
|
|
c4c5c44bf1 | ||
|
|
d3659ebf02 | ||
|
|
6b5070e634 | ||
|
|
09ff96ceee | ||
|
|
f231acf109 | ||
|
|
e576e1662c | ||
|
|
6a21ddd1cd | ||
|
|
c1e35b2459 | ||
|
|
eee2a96029 | ||
|
|
6f5e5a4d20 | ||
|
|
9e73cb7e00 | ||
|
|
d7ab4eec7c | ||
|
|
cdd96a4bc7 | ||
|
|
39a21766b6 | ||
|
|
0e33c18b5c | ||
|
|
7f411ac7dd | ||
|
|
ed1da11c9d | ||
|
|
66ec28dd83 | ||
|
|
b928d96774 | ||
|
|
73335f9c1e | ||
|
|
7b6b81dbc5 | ||
|
|
2e11c574c2 | ||
|
|
d07565807c | ||
|
|
6eccbf215a | ||
|
|
8abc05f04e | ||
|
|
8f587467f9 | ||
|
|
3d7a91c7ff | ||
|
|
3fd408074c | ||
|
|
f71cbd8f51 | ||
|
|
915df8ac16 | ||
|
|
d5ecb08a2d | ||
|
|
9388cd4861 | ||
|
|
180590b411 | ||
|
|
4ec37539bf | ||
|
|
8755fe01b6 | ||
|
|
0bfe54e0c2 | ||
|
|
29bc689b84 | ||
|
|
65682febc7 | ||
|
|
d408665d62 | ||
|
|
64fceb0a01 | ||
|
|
ab58e578c9 | ||
|
|
0e58b8fa5b | ||
|
|
99ac082f00 | ||
|
|
4d3fddc051 | ||
|
|
42456439a9 | ||
|
|
ee0c0e7308 | ||
|
|
998c272bf8 | ||
|
|
daddd1f0e8 | ||
|
|
17f20535cb | ||
|
|
0829ea3ea1 | ||
|
|
2069d9c3d7 | ||
|
|
8a2d526c50 | ||
|
|
8ad96d6f73 | ||
|
|
947faf8c05 | ||
|
|
a948556455 | ||
|
|
835384b730 | ||
|
|
c5b93794f4 | ||
|
|
056cd32f0e | ||
|
|
49bb413110 | ||
|
|
ceccc42050 | ||
|
|
aa3379e1c6 | ||
|
|
4063af0e25 | ||
|
|
d53e6060a4 | ||
|
|
85d8fc8cc0 | ||
|
|
0fe40b1839 | ||
|
|
21de4b757f | ||
|
|
96cdbb2cff | ||
|
|
d531643b58 | ||
|
|
a1779ef488 | ||
|
|
5239dece1e | ||
|
|
a7d7837816 | ||
|
|
ebcfc7df47 | ||
|
|
dc4b9002fe | ||
|
|
33618b6b82 | ||
|
|
597d407acc | ||
|
|
6162a5bdee | ||
|
|
696bbf7a17 | ||
|
|
821fcf0922 | ||
|
|
b1712d838f | ||
|
|
895b865505 | ||
|
|
5a2af5c49e | ||
|
|
24658f4017 | ||
|
|
6707cda75e | ||
|
|
1302a31b3d | ||
|
|
871a1e8f3a | ||
|
|
04e1144bab | ||
|
|
6312d94f3e | ||
|
|
ed91026319 | ||
|
|
441a4e296d | ||
|
|
c33c3f61df | ||
|
|
2cc293b724 | ||
|
|
ee129b2faa | ||
|
|
98d9b3b093 | ||
|
|
57b9b420f8 | ||
|
|
9e73f2603a | ||
|
|
707889be42 | ||
|
|
f9a70e0145 | ||
|
|
b71489cee1 | ||
|
|
0a9bde5f10 | ||
|
|
36d5862375 | ||
|
|
398c702004 | ||
|
|
b2d1798338 | ||
|
|
4f165b0c83 | ||
|
|
2c86944a51 | ||
|
|
5fc51de7d8 | ||
|
|
158e0fb788 | ||
|
|
941d15c1bc | ||
|
|
cd00f8b995 | ||
|
|
44515f8e78 | ||
|
|
54fbc76da5 | ||
|
|
c1b5196134 | ||
|
|
fb3d3be546 | ||
|
|
8e11e242ed | ||
|
|
8a815ce3ef | ||
|
|
91076a50ad | ||
|
|
e624dcdde0 | ||
|
|
a25676122c | ||
|
|
e4dfbe2c9a | ||
|
|
78fb34d049 | ||
|
|
38c4125f4f | ||
|
|
04d6cbafe3 | ||
|
|
e6319172d8 | ||
|
|
5230ff63e3 | ||
|
|
2b364bbff7 | ||
|
|
c4b330b2bb | ||
|
|
308eda6342 | ||
|
|
e8b1cb27f1 | ||
|
|
ed14fd0ced | ||
|
|
fb10e56487 | ||
|
|
56ed0cbc79 | ||
|
|
227e588782 | ||
|
|
53f2108e37 | ||
|
|
ccf4bbf547 | ||
|
|
c99a625b60 | ||
|
|
25ab623328 | ||
|
|
455888bdac | ||
|
|
b650ece0ee | ||
|
|
2cb96c0252 | ||
|
|
70cf59bb4c | ||
|
|
ec03627119 | ||
|
|
675c19f060 | ||
|
|
6721b1b96b | ||
|
|
b4f23822c4 | ||
|
|
3dd1aaeddb | ||
|
|
1e593e6d75 | ||
|
|
ddbcb90c1f | ||
|
|
229fdf20ef | ||
|
|
72e67d0e5d | ||
|
|
b26fefbc37 | ||
|
|
04e0482f60 | ||
|
|
62f90846a8 | ||
|
|
1f9c0e56fe | ||
|
|
fe9d3563a0 | ||
|
|
38a07dffc6 | ||
|
|
1a6500308f | ||
|
|
6033b45ed3 | ||
|
|
33edef6647 | ||
|
|
8f8e8b7492 | ||
|
|
ab5e3198aa | ||
|
|
60ed850d2d | ||
|
|
63b9cc5b16 | ||
|
|
f2edd2bec5 | ||
|
|
44ad59130f | ||
|
|
ecbb1e04ee | ||
|
|
7cb2c3ef49 | ||
|
|
ff4f6bfd78 | ||
|
|
fbe0cb5efa | ||
|
|
aa7cbed611 | ||
|
|
89061f6411 | ||
|
|
838883d8a3 | ||
|
|
cd196f1acd | ||
|
|
a2b894fceb | ||
|
|
c3b3a4c70f | ||
|
|
8118641231 | ||
|
|
6d8a199a3c | ||
|
|
5a44e20a5b | ||
|
|
374739c133 | ||
|
|
992205da4a | ||
|
|
f5e08c68af | ||
|
|
105fee0623 | ||
|
|
aff974c03f | ||
|
|
bada6dc2e2 | ||
|
|
d5aac5e84d | ||
|
|
3577a2ba4a | ||
|
|
04df9f41cc | ||
|
|
fdb5e0cbab | ||
|
|
4b832babfd | ||
|
|
cc3a9b4e5c | ||
|
|
da5a708760 | ||
|
|
9834e85eb9 | ||
|
|
e19601218a | ||
|
|
15c56d3f64 | ||
|
|
632dd1ee75 | ||
|
|
aeff5a491d | ||
|
|
9179c9332d | ||
|
|
bb5de9a00e | ||
|
|
d6b985fcd2 | ||
|
|
3ed8339aa3 | ||
|
|
1b925502bc | ||
|
|
7cea79a9be | ||
|
|
69f565f357 | ||
|
|
23de4d00d7 | ||
|
|
1992efe914 | ||
|
|
c7f3f565cd | ||
|
|
1da02738b0 | ||
|
|
732d8e9be6 | ||
|
|
a2bd614b17 | ||
|
|
003c833293 | ||
|
|
a4c458dc16 | ||
|
|
f7b6ca3f79 | ||
|
|
a7cce69c81 | ||
|
|
2b20a5d69f | ||
|
|
4fc5d1deda | ||
|
|
df13343063 | ||
|
|
a5603cf243 | ||
|
|
b6d4236325 | ||
|
|
7e8f00d9f2 | ||
|
|
721cfb36d1 | ||
|
|
222c951e49 | ||
|
|
45d2e56ed1 | ||
|
|
c5b6c87278 | ||
|
|
fd37e8b8d6 | ||
|
|
ce0310d0b0 | ||
|
|
546bc45861 | ||
|
|
602f2c0a34 | ||
|
|
37de5ab223 | ||
|
|
d69c6c4150 | ||
|
|
d80f44547b | ||
|
|
6c8515a879 | ||
|
|
bb9340a0af | ||
|
|
672c14fb67 | ||
|
|
f4ee798bf0 | ||
|
|
c8ef089b28 | ||
|
|
1f6d560d7e | ||
|
|
f37e07796c | ||
|
|
349c052db0 | ||
|
|
1c291db6c6 | ||
|
|
f46fd4b6bc | ||
|
|
10883eb1a6 | ||
|
|
af6e419aab | ||
|
|
6516896bae | ||
|
|
c495dce27d | ||
|
|
40d96230d2 | ||
|
|
d607b4618a | ||
|
|
fd41d2a6fa | ||
|
|
39690c991b | ||
|
|
09ead4878f | ||
|
|
588d210f24 | ||
|
|
28be86e7ff | ||
|
|
1eac97cbd0 | ||
|
|
e3a3bdb153 | ||
|
|
0e534b583c | ||
|
|
51480e52ef | ||
|
|
187807cfb1 | ||
|
|
d386b97e42 | ||
|
|
da578d2e50 | ||
|
|
7cf89d48dd | ||
|
|
c0ec8298fa | ||
|
|
68322ef562 | ||
|
|
888228c9a2 | ||
|
|
74d6f0b9a0 | ||
|
|
cf475ce2df | ||
|
|
26033b2a37 | ||
|
|
fafd4928d9 | ||
|
|
ec38fdb290 | ||
|
|
086172c5e7 | ||
|
|
3db453a271 | ||
|
|
a5db9c984b | ||
|
|
ead938b40f | ||
|
|
634a7be3f1 | ||
|
|
913606e7f1 | ||
|
|
49b7747ded | ||
|
|
1fd265cc74 | ||
|
|
13389706a9 | ||
|
|
818cd8b070 | ||
|
|
a3d3c7aea7 | ||
|
|
a592ab5f6c | ||
|
|
9b571ce34d | ||
|
|
aa2b158088 | ||
|
|
0d1f2b207c | ||
|
|
38e4e705a4 | ||
|
|
82d7036860 | ||
|
|
0727e7db69 | ||
|
|
2484b1c473 | ||
|
|
750beb5c02 | ||
|
|
cd2e7bbd0f | ||
|
|
21d5383882 | ||
|
|
2ec484da41 | ||
|
|
648ce9970f | ||
|
|
fd278a5ee6 | ||
|
|
4f5cce33fc | ||
|
|
53bb75a2c1 | ||
|
|
45595bd64f | ||
|
|
af4d141c6a | ||
|
|
bef2be10d2 | ||
|
|
803a08a736 | ||
|
|
c86cbdc493 | ||
|
|
186615d988 | ||
|
|
666f91de18 | ||
|
|
c8ce786e39 | ||
|
|
73cb26551b | ||
|
|
d90acb1aeb | ||
|
|
14a2f57c8d | ||
|
|
67f8b9c6dd | ||
|
|
d3336c6cf7 | ||
|
|
17bb88faf4 | ||
|
|
a52c7e91f5 | ||
|
|
8debe60d5c | ||
|
|
ee9a33513a | ||
|
|
723c9cc166 | ||
|
|
cb952d37f7 | ||
|
|
d5fc04795d | ||
|
|
4e0737335f | ||
|
|
d47c8a9e10 | ||
|
|
7ea0105d9f | ||
|
|
8f4bda011b | ||
|
|
48505dbaeb | ||
|
|
278c46f975 | ||
|
|
180343754a | ||
|
|
9aa9ce979b | ||
|
|
1e5be9c655 | ||
|
|
0be5dba2b9 | ||
|
|
0c91e40817 | ||
|
|
c1440c7ac8 | ||
|
|
606f18c016 | ||
|
|
febf109cce | ||
|
|
9b700ffb46 | ||
|
|
9aca927e49 | ||
|
|
adaa1a6b8a | ||
|
|
8790a797d9 | ||
|
|
d7d75f34cd | ||
|
|
950582a5c6 | ||
|
|
d0da1b005b | ||
|
|
1e2c816ef3 | ||
|
|
54b457b4ea | ||
|
|
4d2efd1e04 | ||
|
|
920ea83ece | ||
|
|
d33fe4e936 | ||
|
|
c347b873c5 | ||
|
|
0c6567d8f8 | ||
|
|
195741a768 | ||
|
|
0ca44c3861 | ||
|
|
53ed096e1b | ||
|
|
75f91a9553 | ||
|
|
40b07c9169 | ||
|
|
36e7b1fe21 | ||
|
|
e7fa74326d | ||
|
|
83be83e4bd | ||
|
|
81ce6346b9 | ||
|
|
923ff1acea | ||
|
|
8ec479805a | ||
|
|
f10103d396 | ||
|
|
774bde7c00 | ||
|
|
b4569c14df | ||
|
|
54eea62e4a | ||
|
|
69c4f2dd9e | ||
|
|
acc829b978 | ||
|
|
ff4913c0a5 | ||
|
|
51452c978a | ||
|
|
927ef71ecc | ||
|
|
14541bcb95 | ||
|
|
5c190e5554 | ||
|
|
0f447fc27d | ||
|
|
dfbccf3bc6 | ||
|
|
a491530018 | ||
|
|
c7784aa52f | ||
|
|
0533314b01 | ||
|
|
8da664a025 | ||
|
|
6debf5c355 | ||
|
|
db8efce346 | ||
|
|
b107c71c6f | ||
|
|
ef12168811 | ||
|
|
e1dc970db4 | ||
|
|
f2de8509cc | ||
|
|
1e6c6ef961 | ||
|
|
38e56394d4 | ||
|
|
374fb7de67 | ||
|
|
978256ceab | ||
|
|
5a7fe9b38a | ||
|
|
83c992c0d9 | ||
|
|
18ee28d72e | ||
|
|
6bc3aff587 | ||
|
|
74b3de5c26 | ||
|
|
57a08c93bc | ||
|
|
fabc9fe757 | ||
|
|
6f32f24481 | ||
|
|
dffe7defbb | ||
|
|
b9844528f1 | ||
|
|
cd78dbd0d8 | ||
|
|
798203be9f | ||
|
|
5bfd7dc835 | ||
|
|
c17862fbbb | ||
|
|
04c0369923 | ||
|
|
026cfb6f98 | ||
|
|
a4e6ec0712 | ||
|
|
b3e052cb12 | ||
|
|
cf88ecf172 | ||
|
|
e267440711 | ||
|
|
454094b187 | ||
|
|
862e754a6f | ||
|
|
894877750e | ||
|
|
09b45d5d65 | ||
|
|
1352c3b312 | ||
|
|
30aa2c302e | ||
|
|
3eaa2757b9 | ||
|
|
6f6af1bbc7 | ||
|
|
019561229c | ||
|
|
e212dc8b92 | ||
|
|
5c00013892 | ||
|
|
1e5bdcc068 | ||
|
|
a280a910f5 | ||
|
|
45fe467a21 | ||
|
|
8d3b7adc78 | ||
|
|
079d3a18b0 | ||
|
|
f0b1fc2fe6 | ||
|
|
987bdf6e21 | ||
|
|
1d3507b3a4 | ||
|
|
a82fc7bc8a | ||
|
|
29b3c9a250 | ||
|
|
040c1ead96 | ||
|
|
1c7bed0c15 | ||
|
|
dfcda1b2d9 | ||
|
|
b3aadfc33c | ||
|
|
d5980d9154 | ||
|
|
b5f2945c8b | ||
|
|
9bbffe2ae0 | ||
|
|
09f60d6c18 | ||
|
|
81d9ea19cc | ||
|
|
497d4d68f9 | ||
|
|
853deca3c3 | ||
|
|
99f1530db3 | ||
|
|
b325ae3452 | ||
|
|
f97d334fe5 | ||
|
|
cb114f01cd | ||
|
|
707df76b70 | ||
|
|
bba050032f | ||
|
|
594233c965 | ||
|
|
5795c1f87d | ||
|
|
ccd1852f65 | ||
|
|
17947df168 | ||
|
|
041878096d | ||
|
|
ea3e31058f | ||
|
|
534a54ef75 | ||
|
|
f314536daf | ||
|
|
de4aa52417 | ||
|
|
758b13b189 | ||
|
|
967db1dec6 | ||
|
|
91fd5e4559 | ||
|
|
cf171628cd | ||
|
|
94c29f4723 | ||
|
|
14b2e55a2e | ||
|
|
c30e54a515 | ||
|
|
7ead826677 | ||
|
|
7aecb514db | ||
|
|
ad395aa6e4 | ||
|
|
523ec937dd | ||
|
|
9d2ccd75dd | ||
|
|
3985a6226b | ||
|
|
7d354ffdb6 | ||
|
|
3d70a460ac | ||
|
|
caae656aae | ||
|
|
5708ed1a11 | ||
|
|
ad3998d4c6 | ||
|
|
8638f42e6d | ||
|
|
bc5aef5144 | ||
|
|
2b798c3ea3 | ||
|
|
4d97784829 | ||
|
|
13da38b4cd | ||
|
|
5af89050fb | ||
|
|
d40ceb8343 | ||
|
|
56d1279584 | ||
|
|
d02edb4e76 | ||
|
|
9875ae86f3 | ||
|
|
53f71f7005 | ||
|
|
5de64e6b45 | ||
|
|
67af85eca9 | ||
|
|
779b28a331 | ||
|
|
b9a4d18ed9 | ||
|
|
0dc9ac2b3c | ||
|
|
39d85a692b | ||
|
|
e7661bfd1c | ||
|
|
1649de6c68 | ||
|
|
1089d1fe75 | ||
|
|
fc58a4d435 | ||
|
|
c832d8b107 | ||
|
|
4a9e61be78 | ||
|
|
8cfd1a7fc9 | ||
|
|
315733eec0 | ||
|
|
ad422abe94 | ||
|
|
92210378e1 | ||
|
|
8d3e665206 | ||
|
|
4ee65ef284 | ||
|
|
d048a19066 | ||
|
|
97ed9bcce4 | ||
|
|
316117cb83 | ||
|
|
1d38f6526b | ||
|
|
6feb7d49ee | ||
|
|
ac51f72180 | ||
|
|
86d3323869 | ||
|
|
b181e4f424 | ||
|
|
7917eeeb0b | ||
|
|
b18907fb05 | ||
|
|
3e1861fcf6 | ||
|
|
820b0c2b91 | ||
|
|
57f4834da8 | ||
|
|
08d33e293a | ||
|
|
8e71b18225 | ||
|
|
f297458954 | ||
|
|
eb28648e57 | ||
|
|
0c352512b0 | ||
|
|
4d87506720 | ||
|
|
20bce40dac | ||
|
|
cf79cf86ae | ||
|
|
8e4f62e5be | ||
|
|
a8850e5d0c | ||
|
|
b5a762b5e3 | ||
|
|
418f1a00b8 | ||
|
|
0d9f7ac4ec | ||
|
|
76c9c3ef2a | ||
|
|
ef798860cd | ||
|
|
e57c362d94 | ||
|
|
7605b11fdb | ||
|
|
84e791fc66 | ||
|
|
3e2126cc32 | ||
|
|
b0f4559b83 | ||
|
|
c7e2e18cc8 | ||
|
|
42697fe91d | ||
|
|
900d7f7a3e | ||
|
|
f1953807db | ||
|
|
814e071698 | ||
|
|
2aba132220 | ||
|
|
15a802227d | ||
|
|
6745757712 | ||
|
|
9ff76867c9 | ||
|
|
e8811560de | ||
|
|
65b33a6b0f | ||
|
|
b8b5765b46 | ||
|
|
53f4e167f8 | ||
|
|
3d8f524d4a | ||
|
|
1e68676ac6 | ||
|
|
2c2d594877 | ||
|
|
fae849aeab | ||
|
|
1d47495799 | ||
|
|
592632d669 | ||
|
|
26c05b9e3c | ||
|
|
3f9a40724d | ||
|
|
a652a08815 | ||
|
|
61ffd1b766 | ||
|
|
d9f4583224 | ||
|
|
95647337aa | ||
|
|
b1e152179e | ||
|
|
142a820ed7 | ||
|
|
838b45ef26 | ||
|
|
30914b267a | ||
|
|
f1cbbdb56b | ||
|
|
9973e8f132 | ||
|
|
f53c479262 | ||
|
|
73a415a038 | ||
|
|
0b24e3224f | ||
|
|
c271256015 | ||
|
|
4887ddaa26 | ||
|
|
788c582f98 | ||
|
|
df9f7f33cf | ||
|
|
fd2e0399f8 | ||
|
|
db733ceef8 | ||
|
|
f905eb3fdf | ||
|
|
e707887702 | ||
|
|
c0ace1fe07 | ||
|
|
7bb3a3910b | ||
|
|
983113b6cc | ||
|
|
ff66c9a88d | ||
|
|
56d30d48c5 | ||
|
|
df3a0b4c50 | ||
|
|
f87aa08246 | ||
|
|
ea499a645b | ||
|
|
0fdb42c593 | ||
|
|
6e5584a433 | ||
|
|
0a4df0793d | ||
|
|
1e6cc67b05 | ||
|
|
3c4a558e02 | ||
|
|
76001cffe1 | ||
|
|
45a9c5aa07 | ||
|
|
f926184471 | ||
|
|
5ffd3712cf | ||
|
|
80451796e1 | ||
|
|
141d5805ae | ||
|
|
250ffe243d | ||
|
|
b4decd018a | ||
|
|
46d489c91f | ||
|
|
8a0bcc5cb4 | ||
|
|
77258b12e9 | ||
|
|
59aaacbeee | ||
|
|
3c86981dc6 | ||
|
|
5594b6863c | ||
|
|
7201c29df5 | ||
|
|
947736e8c1 | ||
|
|
d782a30e90 | ||
|
|
987dbb7700 | ||
|
|
cdd007ce88 | ||
|
|
a38066ec82 | ||
|
|
2aca34e488 | ||
|
|
324306a77d | ||
|
|
ab8a66bdcf | ||
|
|
b3f393a6f1 | ||
|
|
1ee886db63 | ||
|
|
fc9450434c | ||
|
|
00f4fcf292 | ||
|
|
0512ac2c3c | ||
|
|
dd32982cbe | ||
|
|
a3bfb73937 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -10,3 +10,6 @@ lib/www/client/source/dist/
|
||||
lib/www/client/dist/
|
||||
etc/surveys/*.yaml
|
||||
!etc/surveys/_*.yaml
|
||||
etc/ssl/*
|
||||
etc/config.yaml
|
||||
var/*
|
||||
|
||||
27
bin/check_mounts_present.py
Executable file
27
bin/check_mounts_present.py
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Check if any of the directories provided in the imports.mounts configuration
|
||||
section are empty.
|
||||
|
||||
Returns 0 if all arguments are non-empty, 1 otherwise. It stops at the first
|
||||
empty directory.
|
||||
"""
|
||||
|
||||
import os
|
||||
import configuration
|
||||
|
||||
cfg = configuration.read()
|
||||
|
||||
if cfg and "imports" in cfg and "mounts" in cfg["imports"]:
|
||||
|
||||
mounts = cfg["imports"]["mounts"]
|
||||
for item in mounts:
|
||||
with os.scandir(item) as contents:
|
||||
if not any(contents):
|
||||
exit(1)
|
||||
|
||||
else:
|
||||
print("No mounts in configuration")
|
||||
|
||||
exit(0)
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import pathlib
|
||||
from glob import glob
|
||||
from yaml import full_load as _load
|
||||
|
||||
@@ -11,6 +12,18 @@ surveys should be under $HOME/etc/surveys/*.yaml. In both cases,
|
||||
$HOME is the home directory of the user running this script.
|
||||
"""
|
||||
|
||||
def is_relative_to(it, other):
|
||||
"""
|
||||
is_relative_to() is not present version Python 3.9, so we
|
||||
need this kludge to get Dougal to run on OpenSUSE 15.4
|
||||
"""
|
||||
|
||||
if "is_relative_to" in dir(it):
|
||||
return it.is_relative_to(other)
|
||||
|
||||
return str(it.absolute()).startswith(str(other.absolute()))
|
||||
|
||||
|
||||
prefix = os.environ.get("DOUGAL_ROOT", os.environ.get("HOME", ".")+"/software")
|
||||
|
||||
DOUGAL_ROOT = os.environ.get("DOUGAL_ROOT", os.environ.get("HOME", ".")+"/software")
|
||||
@@ -54,6 +67,10 @@ def files (globspec = None, include_archived = False):
|
||||
quickly and temporarily “disabling” a survey configuration by renaming
|
||||
the relevant file.
|
||||
"""
|
||||
|
||||
print("This method is obsolete")
|
||||
return
|
||||
|
||||
tuples = []
|
||||
|
||||
if globspec is None:
|
||||
@@ -87,3 +104,73 @@ def rxflags (flagstr):
|
||||
for flag in flagstr:
|
||||
flags |= cases.get(flag, 0)
|
||||
return flags
|
||||
|
||||
def translate_path (file):
|
||||
"""
|
||||
Translate a path from a Dougal import directory to an actual
|
||||
physical path on disk.
|
||||
|
||||
Any user files accessible by Dougal must be under a path prefixed
|
||||
by `(config.yaml).imports.paths`. The value of `imports.paths` may
|
||||
be either a string, in which case this represents the prefix under
|
||||
which all Dougal data resides, or a dictionary where the keys are
|
||||
logical paths and their values the corresponding physical path.
|
||||
"""
|
||||
cfg = read()
|
||||
root = pathlib.Path(DOUGAL_ROOT)
|
||||
filepath = pathlib.Path(file).resolve()
|
||||
import_paths = cfg["imports"]["paths"]
|
||||
|
||||
if filepath.is_absolute():
|
||||
if type(import_paths) == str:
|
||||
# Substitute the root for the real physical path
|
||||
# NOTE: `root` deals with import_paths not being absolute
|
||||
prefix = root.joinpath(pathlib.Path(import_paths)).resolve()
|
||||
return str(pathlib.Path(prefix).joinpath(*filepath.parts[2:]))
|
||||
else:
|
||||
# Look for a match on the second path element
|
||||
if filepath.parts[1] in import_paths:
|
||||
# NOTE: `root` deals with import_paths[…] not being absolute
|
||||
prefix = root.joinpath(import_paths[filepath.parts[1]])
|
||||
return str(pathlib.Path(prefix).joinpath(*filepath.parts[2:]))
|
||||
else:
|
||||
# This path is invalid
|
||||
raise TypeError("invalid path or file: {0!r}".format(filepath))
|
||||
else:
|
||||
# A relative filepath is always resolved relative to the logical root
|
||||
root = pathlib.Path("/")
|
||||
return translate_path(root.joinpath(filepath))
|
||||
|
||||
def untranslate_path (file):
|
||||
"""
|
||||
Attempt to convert a physical path into a logical one.
|
||||
See `translate_path()` above for details.
|
||||
"""
|
||||
cfg = read()
|
||||
dougal_root = pathlib.Path(DOUGAL_ROOT)
|
||||
filepath = pathlib.Path(file).resolve()
|
||||
import_paths = cfg["imports"]["paths"]
|
||||
physical_root = pathlib.Path("/")
|
||||
|
||||
if filepath.is_absolute():
|
||||
if type(import_paths) == str:
|
||||
if is_relative_to(filepath, import_paths):
|
||||
physical_root = pathlib.Path("/")
|
||||
physical_prefix = pathlib.Path(import_paths)
|
||||
return str(root.joinpath(filepath.relative_to(physical_prefix)))
|
||||
else:
|
||||
raise TypeError("invalid path or file: {0!r}".format(filepath))
|
||||
else:
|
||||
for key, value in import_paths.items():
|
||||
value = dougal_root.joinpath(value)
|
||||
physical_prefix = pathlib.Path(value)
|
||||
if is_relative_to(filepath, physical_prefix):
|
||||
logical_prefix = physical_root.joinpath(pathlib.Path(key)).resolve()
|
||||
return str(logical_prefix.joinpath(filepath.relative_to(physical_prefix)))
|
||||
|
||||
# If we got here with no matches, this is not a valid
|
||||
# Dougal data path
|
||||
raise TypeError("invalid path or file: {0!r}".format(filepath))
|
||||
else:
|
||||
# A relative filepath is always resolved relative to DOUGAL_ROOT
|
||||
return untranslate_path(root.joinpath(filepath))
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
# be known to the database.
|
||||
# * PROJECT_NAME is a more descriptive name for human consumption.
|
||||
# * EPSG_CODE is the EPSG code identifying the CRS for the grid data in the
|
||||
# navigation files, e.g., 32031.
|
||||
# navigation files, e.g., 23031.
|
||||
#
|
||||
# In addition to this, certain other parameters may be controlled via
|
||||
# environment variables:
|
||||
|
||||
26
bin/daily_tasks.py
Executable file
26
bin/daily_tasks.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Do daily housekeeping on the database.
|
||||
|
||||
This is meant to run shortly after midnight every day.
|
||||
"""
|
||||
|
||||
import configuration
|
||||
from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
print(f'Survey: {survey["id"]} ({survey["schema"]})')
|
||||
db.set_survey(survey["schema"])
|
||||
|
||||
print("Daily tasks")
|
||||
db.run_daily_tasks()
|
||||
|
||||
print("Done")
|
||||
274
bin/datastore.py
274
bin/datastore.py
@@ -4,6 +4,7 @@ import psycopg2
|
||||
import configuration
|
||||
import preplots
|
||||
import p111
|
||||
from hashlib import md5 # Because it's good enough
|
||||
|
||||
"""
|
||||
Interface to the PostgreSQL database.
|
||||
@@ -11,13 +12,16 @@ Interface to the PostgreSQL database.
|
||||
|
||||
def file_hash(file):
|
||||
"""
|
||||
Calculate a file hash based on its size, inode, modification and creation times.
|
||||
Calculate a file hash based on its name, size, modification and creation times.
|
||||
|
||||
The hash is used to uniquely identify files in the database and detect if they
|
||||
have changed.
|
||||
"""
|
||||
h = md5()
|
||||
h.update(file.encode())
|
||||
name_digest = h.hexdigest()[:16]
|
||||
st = os.stat(file)
|
||||
return ":".join([str(v) for v in [st.st_size, st.st_mtime, st.st_ctime, st.st_ino]])
|
||||
return ":".join([str(v) for v in [st.st_size, st.st_mtime, st.st_ctime, name_digest]])
|
||||
|
||||
class Datastore:
|
||||
"""
|
||||
@@ -48,7 +52,7 @@ class Datastore:
|
||||
|
||||
self.conn = psycopg2.connect(configuration.read()["db"]["connection_string"], **opts)
|
||||
|
||||
def set_autocommit(value = True):
|
||||
def set_autocommit(self, value = True):
|
||||
"""
|
||||
Enable or disable autocommit.
|
||||
|
||||
@@ -91,7 +95,7 @@ class Datastore:
|
||||
cursor.execute(qry, (filepath,))
|
||||
results = cursor.fetchall()
|
||||
if len(results):
|
||||
return (filepath, file_hash(filepath)) in results
|
||||
return (filepath, file_hash(configuration.translate_path(filepath))) in results
|
||||
|
||||
|
||||
def add_file(self, path, cursor = None):
|
||||
@@ -103,7 +107,8 @@ class Datastore:
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
hash = file_hash(path)
|
||||
realpath = configuration.translate_path(path)
|
||||
hash = file_hash(realpath)
|
||||
qry = "CALL add_file(%s, %s);"
|
||||
cur.execute(qry, (path, hash))
|
||||
if cursor is None:
|
||||
@@ -172,7 +177,7 @@ class Datastore:
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
hash = file_hash(path)
|
||||
hash = file_hash(configuration.translate_path(path))
|
||||
qry = """
|
||||
UPDATE raw_lines rl
|
||||
SET ntbp = %s
|
||||
@@ -251,6 +256,78 @@ class Datastore:
|
||||
|
||||
self.maybe_commit()
|
||||
|
||||
|
||||
def save_preplot_line_info(self, lines, filepath, filedata = None):
|
||||
"""
|
||||
Save preplot line information
|
||||
|
||||
Arguments:
|
||||
|
||||
lines (iterable): should be a collection of lines returned from
|
||||
one of the line info reading functions (see preplots.py).
|
||||
|
||||
filepath (string): the full path to the preplot file from where the lines
|
||||
have been read. It will be added to the survey's `file` table so that
|
||||
it can be monitored for changes.
|
||||
"""
|
||||
|
||||
with self.conn.cursor() as cursor:
|
||||
cursor.execute("BEGIN;")
|
||||
|
||||
# Check which preplot lines we actually have already imported,
|
||||
# as the line info file may contain extra lines.
|
||||
|
||||
qry = """
|
||||
SELECT line, class
|
||||
FROM preplot_lines
|
||||
ORDER BY line, class;
|
||||
"""
|
||||
cursor.execute(qry)
|
||||
preplot_lines = cursor.fetchall()
|
||||
|
||||
hash = self.add_file(filepath, cursor)
|
||||
count=0
|
||||
for line in lines:
|
||||
count += 1
|
||||
|
||||
if not (line["sail_line"], "V") in preplot_lines:
|
||||
print(f"\u001b[2KSkipping line {count} / {len(lines)}", end="\n", flush=True)
|
||||
continue
|
||||
|
||||
print(f"\u001b[2KSaving line {count} / {len(lines)} ", end="\n", flush=True)
|
||||
|
||||
sail_line = line["sail_line"]
|
||||
incr = line.get("incr", True)
|
||||
ntba = line.get("ntba", False)
|
||||
remarks = line.get("remarks", None)
|
||||
meta = json.dumps(line.get("meta", {}))
|
||||
source_lines = line.get("source_line", [])
|
||||
|
||||
for source_line in source_lines:
|
||||
qry = """
|
||||
INSERT INTO preplot_saillines AS ps
|
||||
(sailline, line, sailline_class, line_class, incr, ntba, remarks, meta, hash)
|
||||
VALUES
|
||||
(%s, %s, 'V', 'S', %s, %s, %s, %s, %s)
|
||||
ON CONFLICT (sailline, sailline_class, line, line_class, incr) DO UPDATE
|
||||
SET
|
||||
incr = EXCLUDED.incr,
|
||||
ntba = EXCLUDED.ntba,
|
||||
remarks = COALESCE(EXCLUDED.remarks, ps.remarks),
|
||||
meta = ps.meta || EXCLUDED.meta,
|
||||
hash = EXCLUDED.hash;
|
||||
"""
|
||||
|
||||
# NOTE Consider using cursor.executemany() instead. Then again,
|
||||
# we're only expecting a few hundred lines at most.
|
||||
cursor.execute(qry, (sail_line, source_line, incr, ntba, remarks, meta, hash))
|
||||
|
||||
if filedata is not None:
|
||||
self.save_file_data(filepath, json.dumps(filedata), cursor)
|
||||
|
||||
self.maybe_commit()
|
||||
|
||||
|
||||
def save_raw_p190(self, records, fileinfo, filepath, epsg = 0, filedata = None, ntbp = False):
|
||||
"""
|
||||
Save raw P1 data.
|
||||
@@ -390,20 +467,40 @@ class Datastore:
|
||||
|
||||
with self.conn.cursor() as cursor:
|
||||
cursor.execute("BEGIN;")
|
||||
|
||||
|
||||
hash = self.add_file(filepath, cursor)
|
||||
|
||||
if not records or len(records) == 0:
|
||||
print("File has no records (or none have been detected)")
|
||||
# We add the file to the database anyway to signal that we have
|
||||
# actually seen it.
|
||||
self.maybe_commit()
|
||||
return
|
||||
|
||||
incr = p111.point_number(records[0]) <= p111.point_number(records[-1])
|
||||
|
||||
# Start by deleting any online data we may have for this sequence
|
||||
self.del_hash("*online*", cursor)
|
||||
|
||||
qry = """
|
||||
INSERT INTO raw_lines (sequence, line, remarks, ntbp, incr)
|
||||
VALUES (%s, %s, '', %s, %s)
|
||||
ON CONFLICT DO NOTHING;
|
||||
INSERT INTO raw_lines (sequence, line, remarks, ntbp, incr, meta)
|
||||
VALUES (%s, %s, '', %s, %s, %s)
|
||||
ON CONFLICT (sequence) DO UPDATE SET
|
||||
line = EXCLUDED.line,
|
||||
ntbp = EXCLUDED.ntbp,
|
||||
incr = EXCLUDED.incr,
|
||||
meta = EXCLUDED.meta;
|
||||
"""
|
||||
|
||||
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"], ntbp, incr))
|
||||
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"], ntbp, incr, json.dumps(fileinfo["meta"])))
|
||||
|
||||
qry = """
|
||||
UPDATE raw_lines
|
||||
SET meta = meta || %s
|
||||
WHERE sequence = %s;
|
||||
"""
|
||||
|
||||
cursor.execute(qry, (json.dumps(fileinfo["meta"]), fileinfo["sequence"]))
|
||||
|
||||
qry = """
|
||||
INSERT INTO raw_lines_files (sequence, hash)
|
||||
@@ -436,16 +533,26 @@ class Datastore:
|
||||
|
||||
with self.conn.cursor() as cursor:
|
||||
cursor.execute("BEGIN;")
|
||||
|
||||
|
||||
hash = self.add_file(filepath, cursor)
|
||||
|
||||
qry = """
|
||||
INSERT INTO final_lines (sequence, line, remarks)
|
||||
VALUES (%s, %s, '')
|
||||
ON CONFLICT DO NOTHING;
|
||||
INSERT INTO final_lines (sequence, line, remarks, meta)
|
||||
VALUES (%s, %s, '', %s)
|
||||
ON CONFLICT (sequence) DO UPDATE SET
|
||||
line = EXCLUDED.line,
|
||||
meta = EXCLUDED.meta;
|
||||
"""
|
||||
|
||||
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"]))
|
||||
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"], json.dumps(fileinfo["meta"])))
|
||||
|
||||
qry = """
|
||||
UPDATE raw_lines
|
||||
SET meta = meta || %s
|
||||
WHERE sequence = %s;
|
||||
"""
|
||||
|
||||
cursor.execute(qry, (json.dumps(fileinfo["meta"]), fileinfo["sequence"]))
|
||||
|
||||
qry = """
|
||||
INSERT INTO final_lines_files (sequence, hash)
|
||||
@@ -472,6 +579,8 @@ class Datastore:
|
||||
if filedata is not None:
|
||||
self.save_file_data(filepath, json.dumps(filedata), cursor)
|
||||
|
||||
cursor.execute("CALL final_line_post_import(%s);", (fileinfo["sequence"],))
|
||||
|
||||
self.maybe_commit()
|
||||
|
||||
def save_raw_smsrc (self, records, fileinfo, filepath, filedata = None):
|
||||
@@ -506,7 +615,7 @@ class Datastore:
|
||||
|
||||
qry = """
|
||||
UPDATE raw_shots
|
||||
SET meta = jsonb_set(meta, '{smsrc}', %s::jsonb, true)
|
||||
SET meta = jsonb_set(meta, '{smsrc}', %s::jsonb, true) - 'qc'
|
||||
WHERE sequence = %s AND point = %s;
|
||||
"""
|
||||
|
||||
@@ -552,7 +661,68 @@ class Datastore:
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
|
||||
def get_file_data(self, path, cursor = None):
|
||||
"""
|
||||
Retrieve arbitrary data associated with a file.
|
||||
"""
|
||||
|
||||
if cursor is None:
|
||||
cur = self.conn.cursor()
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
realpath = configuration.translate_path(path)
|
||||
hash = file_hash(realpath)
|
||||
|
||||
qry = """
|
||||
SELECT data
|
||||
FROM file_data
|
||||
WHERE hash = %s;
|
||||
"""
|
||||
|
||||
cur.execute(qry, (hash,))
|
||||
res = cur.fetchone()
|
||||
|
||||
if cursor is None:
|
||||
self.maybe_commit()
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
return res[0]
|
||||
|
||||
def surveys (self, include_archived = False):
|
||||
"""
|
||||
Return list of survey definitions.
|
||||
"""
|
||||
|
||||
if self.conn is None:
|
||||
self.connect()
|
||||
|
||||
if include_archived:
|
||||
qry = """
|
||||
SELECT meta, schema
|
||||
FROM public.projects;
|
||||
"""
|
||||
else:
|
||||
qry = """
|
||||
SELECT meta, schema
|
||||
FROM public.projects
|
||||
WHERE NOT (meta->'archived')::boolean IS true
|
||||
"""
|
||||
|
||||
with self.conn:
|
||||
with self.conn.cursor() as cursor:
|
||||
|
||||
cursor.execute(qry)
|
||||
results = cursor.fetchall()
|
||||
surveys = []
|
||||
for r in results:
|
||||
if r[0]:
|
||||
r[0]['schema'] = r[1]
|
||||
surveys.append(r[0])
|
||||
return surveys
|
||||
|
||||
|
||||
# TODO Does this need tweaking on account of #246?
|
||||
def apply_survey_configuration(self, cursor = None):
|
||||
if cursor is None:
|
||||
cur = self.conn.cursor()
|
||||
@@ -631,3 +801,73 @@ class Datastore:
|
||||
self.maybe_commit()
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
|
||||
def del_sequence_final(self, sequence, cursor = None):
|
||||
"""
|
||||
Remove final data for a sequence.
|
||||
"""
|
||||
|
||||
if cursor is None:
|
||||
cur = self.conn.cursor()
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
qry = "DELETE FROM files WHERE hash = (SELECT hash FROM final_lines_files WHERE sequence = %s);"
|
||||
cur.execute(qry, (sequence,))
|
||||
if cursor is None:
|
||||
self.maybe_commit()
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
|
||||
def adjust_planner(self, cursor = None):
|
||||
"""
|
||||
Adjust estimated times on the planner
|
||||
"""
|
||||
if cursor is None:
|
||||
cur = self.conn.cursor()
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
qry = "CALL adjust_planner();"
|
||||
cur.execute(qry)
|
||||
if cursor is None:
|
||||
self.maybe_commit()
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
|
||||
def housekeep_event_log(self, cursor = None):
|
||||
"""
|
||||
Call housekeeping actions on the event log
|
||||
"""
|
||||
if cursor is None:
|
||||
cur = self.conn.cursor()
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
qry = "CALL augment_event_data();"
|
||||
cur.execute(qry)
|
||||
|
||||
qry = "CALL scan_placeholders();"
|
||||
cur.execute(qry)
|
||||
|
||||
if cursor is None:
|
||||
self.maybe_commit()
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
|
||||
def run_daily_tasks(self, cursor = None):
|
||||
"""
|
||||
Run once-a-day tasks
|
||||
"""
|
||||
if cursor is None:
|
||||
cur = self.conn.cursor()
|
||||
else:
|
||||
cur = cursor
|
||||
|
||||
qry = "CALL log_midnight_shots();"
|
||||
cur.execute(qry)
|
||||
|
||||
if cursor is None:
|
||||
self.maybe_commit()
|
||||
# We do not commit if we've been passed a cursor, instead
|
||||
# we assume that we are in the middle of a transaction
|
||||
|
||||
163
bin/delimited.py
Normal file
163
bin/delimited.py
Normal file
@@ -0,0 +1,163 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Delimited record importing functions.
|
||||
"""
|
||||
|
||||
import csv
|
||||
import builtins
|
||||
|
||||
def to_bool (v):
|
||||
try:
|
||||
return bool(int(v))
|
||||
except ValueError:
|
||||
if type(v) == str:
|
||||
return v.strip().lower().startswith("t")
|
||||
return False
|
||||
|
||||
transform = {
|
||||
"int": lambda v: builtins.int(float(v)),
|
||||
"float": float,
|
||||
"string": str,
|
||||
"bool": to_bool
|
||||
}
|
||||
|
||||
def cast_values (row, fields):
|
||||
|
||||
def enum_for (key):
|
||||
field = fields.get(key, {})
|
||||
def enum (val):
|
||||
if "enum" in field:
|
||||
ret_val = field.get("default", val)
|
||||
enums = field.get("enum", [])
|
||||
for enum_key in enums:
|
||||
if enum_key == val:
|
||||
ret_val = enums[enum_key]
|
||||
return ret_val
|
||||
return val
|
||||
return enum
|
||||
|
||||
# Get rid of any unwanted data
|
||||
if None in row:
|
||||
del(row[None])
|
||||
|
||||
for key in row:
|
||||
|
||||
val = row[key]
|
||||
enum = enum_for(key)
|
||||
transformer = transform.get(fields.get(key, {}).get("type"), str)
|
||||
|
||||
if type(val) == list:
|
||||
for i, v in enumerate(val):
|
||||
row[key][i] = transformer(enum(v))
|
||||
elif type(val) == dict:
|
||||
continue
|
||||
else:
|
||||
row[key] = transformer(enum(val))
|
||||
return row
|
||||
|
||||
def build_fieldnames (spec): #(arr, key, val):
|
||||
fieldnames = []
|
||||
|
||||
if "fields" in spec:
|
||||
for key in spec["fields"]:
|
||||
index = spec["fields"][key]["column"]
|
||||
try:
|
||||
fieldnames[index] = key
|
||||
except IndexError:
|
||||
assert index >= 0
|
||||
fieldnames.extend(((index + 1) - len(fieldnames)) * [None])
|
||||
fieldnames[index] = key
|
||||
|
||||
return fieldnames
|
||||
|
||||
|
||||
def from_file_delimited (path, spec):
|
||||
|
||||
fieldnames = build_fieldnames(spec)
|
||||
fields = spec.get("fields", [])
|
||||
delimiter = spec.get("delimiter", ",")
|
||||
firstRow = spec.get("firstRow", 0)
|
||||
headerRow = spec.get("headerRow", False)
|
||||
if headerRow:
|
||||
firstRow += 1
|
||||
|
||||
records = []
|
||||
with open(path, "r", errors="ignore") as fd:
|
||||
|
||||
if spec.get("type") == "x-sl+csv":
|
||||
fieldnames = None # Pick from header row
|
||||
firstRow = 0
|
||||
reader = csv.DictReader(fd, delimiter=delimiter)
|
||||
else:
|
||||
reader = csv.DictReader(fd, fieldnames=fieldnames, delimiter=delimiter)
|
||||
|
||||
row = 0
|
||||
for line in reader:
|
||||
skip = False
|
||||
|
||||
if row < firstRow:
|
||||
skip = True
|
||||
|
||||
if not skip:
|
||||
records.append(cast_values(dict(line), fields))
|
||||
|
||||
row += 1
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def remap (line, headers):
|
||||
row = dict()
|
||||
for i, key in enumerate(headers):
|
||||
if "." in key[1:-1]:
|
||||
# This is an object
|
||||
k, attr = key.split(".")
|
||||
if not k in row:
|
||||
row[k] = dict()
|
||||
row[k][attr] = line[i]
|
||||
elif key in row:
|
||||
if type(row[key]) == list:
|
||||
row[key].append(line[i])
|
||||
else:
|
||||
row[key] = [ row[key], line[i] ]
|
||||
else:
|
||||
row[key] = line[i]
|
||||
return row
|
||||
|
||||
def from_file_saillines (path, spec):
|
||||
|
||||
fields = {
|
||||
"sail_line": { "type": "int" },
|
||||
"source_line": { "type": "int" },
|
||||
"incr": { "type": "bool" },
|
||||
"ntba": { "type": "bool" }
|
||||
}
|
||||
|
||||
# fields = spec.get("fields", sl_fields)
|
||||
delimiter = spec.get("delimiter", ",")
|
||||
firstRow = spec.get("firstRow", 0)
|
||||
|
||||
records = []
|
||||
with open(path, "r", errors="ignore") as fd:
|
||||
row = 0
|
||||
reader = csv.reader(fd, delimiter=delimiter)
|
||||
while row < firstRow:
|
||||
next(reader)
|
||||
row += 1
|
||||
headers = [ h.strip() for h in next(reader) if len(h.strip()) ]
|
||||
|
||||
for line in reader:
|
||||
records.append(cast_values(remap(line, headers), fields))
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def from_file_p111 (path, spec):
|
||||
pass
|
||||
|
||||
def from_file (path, spec):
|
||||
if spec.get("type") == "x-sl+csv":
|
||||
return from_file_saillines(path, spec)
|
||||
else:
|
||||
return from_file_delimited(path, spec)
|
||||
128
bin/fwr.py
Normal file
128
bin/fwr.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Fixed width record importing functions.
|
||||
"""
|
||||
|
||||
import builtins
|
||||
|
||||
def to_bool (v):
|
||||
try:
|
||||
return bool(int(v))
|
||||
except ValueError:
|
||||
if type(v) == str:
|
||||
return v.strip().lower().startswith("t")
|
||||
return False
|
||||
|
||||
transform = {
|
||||
"int": lambda v: builtins.int(float(v)),
|
||||
"float": float,
|
||||
"string": str,
|
||||
"str": str,
|
||||
"bool": to_bool
|
||||
}
|
||||
|
||||
def parse_line (line, fields, fixed = None):
|
||||
# print("parse_line", line, fields, fixed)
|
||||
data = dict()
|
||||
|
||||
if fixed:
|
||||
for value in fixed:
|
||||
start = value["offset"]
|
||||
end = start + len(value["text"])
|
||||
text = line[start:end]
|
||||
if text != value["text"]:
|
||||
return f"Expected text `{value['text']}` at position {start} but found `{text}` instead."
|
||||
|
||||
for key in fields:
|
||||
spec = fields[key]
|
||||
transformer = transform[spec.get("type", "str")]
|
||||
pos_from = spec["offset"]
|
||||
pos_to = pos_from + spec["length"]
|
||||
text = line[pos_from:pos_to]
|
||||
value = transformer(text)
|
||||
if "enum" in spec:
|
||||
if "default" in spec:
|
||||
value = spec["default"]
|
||||
for enum_key in spec["enum"]:
|
||||
if enum_key == text:
|
||||
enum_value = transformer(spec["enum"][enum_key])
|
||||
value = enum_value
|
||||
break
|
||||
|
||||
data[key] = value
|
||||
|
||||
# print("parse_line data =", data)
|
||||
return data
|
||||
|
||||
|
||||
specfields = {
|
||||
"sps1": {
|
||||
"line_name": { "offset": 1, "length": 16, "type": "int" },
|
||||
"point_number": { "offset": 17, "length": 8, "type": "int" },
|
||||
"easting": { "offset": 46, "length": 9, "type": "float" },
|
||||
"northing": { "offset": 55, "length": 10, "type": "float" }
|
||||
},
|
||||
"sps21": {
|
||||
"line_name": { "offset": 1, "length": 7, "type": "int" },
|
||||
"point_number": { "offset": 11, "length": 7, "type": "int" },
|
||||
"easting": { "offset": 46, "length": 9, "type": "float" },
|
||||
"northing": { "offset": 55, "length": 10, "type": "float" }
|
||||
},
|
||||
"p190": {
|
||||
"line_name": { "offset": 1, "length": 12, "type": "int" },
|
||||
"point_number": { "offset": 19, "length": 6, "type": "int" },
|
||||
"easting": { "offset": 46, "length": 9, "type": "float" },
|
||||
"northing": { "offset": 55, "length": 9, "type": "float" }
|
||||
},
|
||||
}
|
||||
|
||||
def from_file(path, spec):
|
||||
|
||||
# If spec.fields is not present, deduce it from spec.type ("sps1", "sps21", "p190", etc.)
|
||||
if "fields" in spec:
|
||||
fields = spec["fields"]
|
||||
elif "type" in spec and spec["type"] in specfields:
|
||||
fields = specfields[spec["type"]]
|
||||
else:
|
||||
# TODO: Should default to looking for spec.format and doing a legacy import on it
|
||||
return "Neither 'type' nor 'fields' given. I don't know how to import this fixed-width dataset."
|
||||
|
||||
firstRow = spec.get("firstRow", 0)
|
||||
|
||||
skipStart = [] # Skip lines starting with any of these values
|
||||
skipMatch = [] # Skip lines matching any of these values
|
||||
|
||||
if "type" in spec:
|
||||
if spec["type"] == "sps1" or spec["type"] == "sps21" or spec["type"] == "p190":
|
||||
skipStart = "H"
|
||||
skipMatch = "EOF"
|
||||
|
||||
records = []
|
||||
with open(path, "r", errors="ignore") as fd:
|
||||
row = 0
|
||||
line = fd.readline()
|
||||
|
||||
while line:
|
||||
skip = False
|
||||
|
||||
if row < firstRow:
|
||||
skip = True
|
||||
|
||||
if not skip:
|
||||
for v in skipStart:
|
||||
if line.startswith(v):
|
||||
skip = True
|
||||
break
|
||||
for v in skipMatch:
|
||||
if line == v:
|
||||
skip = True
|
||||
break
|
||||
|
||||
if not skip:
|
||||
records.append(parse_line(line, fields))
|
||||
|
||||
row += 1
|
||||
line = fd.readline()
|
||||
|
||||
return records
|
||||
26
bin/housekeep_database.py
Executable file
26
bin/housekeep_database.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Do housekeeping actions on the database.
|
||||
"""
|
||||
|
||||
import configuration
|
||||
from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
print(f'Survey: {survey["id"]} ({survey["schema"]})')
|
||||
db.set_survey(survey["schema"])
|
||||
|
||||
print("Planner adjustment")
|
||||
db.adjust_planner()
|
||||
print("Event log housekeeping")
|
||||
db.housekeep_event_log()
|
||||
|
||||
print("Done")
|
||||
@@ -59,7 +59,7 @@ def qc_data (cursor, prefix):
|
||||
else:
|
||||
print("No QC data found");
|
||||
return
|
||||
|
||||
|
||||
#print("QC", qc)
|
||||
index = 0
|
||||
for item in qc["results"]:
|
||||
|
||||
@@ -39,7 +39,7 @@ def seis_data (survey):
|
||||
if not pathlib.Path(pathPrefix).exists():
|
||||
print(pathPrefix)
|
||||
raise ValueError("Export path does not exist")
|
||||
|
||||
|
||||
print(f"Requesting sequences for {survey['id']}")
|
||||
url = f"http://localhost:3000/api/project/{survey['id']}/sequence"
|
||||
r = requests.get(url)
|
||||
@@ -47,12 +47,12 @@ def seis_data (survey):
|
||||
for sequence in r.json():
|
||||
if sequence['status'] not in ["final", "ntbp"]:
|
||||
continue
|
||||
|
||||
|
||||
filename = pathlib.Path(pathPrefix, "sequence{:0>3d}.json".format(sequence['sequence']))
|
||||
if filename.exists():
|
||||
print(f"Skipping export for sequence {sequence['sequence']} – file already exists")
|
||||
continue
|
||||
|
||||
|
||||
print(f"Processing sequence {sequence['sequence']}")
|
||||
url = f"http://localhost:3000/api/project/{survey['id']}/event?sequence={sequence['sequence']}&missing=t"
|
||||
headers = { "Accept": "application/vnd.seis+json" }
|
||||
|
||||
@@ -12,18 +12,51 @@ import os
|
||||
import sys
|
||||
import pathlib
|
||||
import re
|
||||
import time
|
||||
import configuration
|
||||
import p111
|
||||
import fwr
|
||||
from datastore import Datastore
|
||||
|
||||
def add_pending_remark(db, sequence):
|
||||
text = '<!-- @@DGL:PENDING@@ --><h4 style="color:red;cursor:help;" title="Edit the sequence file or directory name to import final data">Marked as <code>PENDING</code>.</h4><!-- @@/DGL:PENDING@@ -->\n'
|
||||
|
||||
with db.conn.cursor() as cursor:
|
||||
qry = "SELECT remarks FROM raw_lines WHERE sequence = %s;"
|
||||
cursor.execute(qry, (sequence,))
|
||||
remarks = cursor.fetchone()[0]
|
||||
rx = re.compile("^(<!-- @@DGL:PENDING@@ -->.*<!-- @@/DGL:PENDING@@ -->\n)")
|
||||
m = rx.match(remarks)
|
||||
if m is None:
|
||||
remarks = text + remarks
|
||||
qry = "UPDATE raw_lines SET remarks = %s WHERE sequence = %s;"
|
||||
cursor.execute(qry, (remarks, sequence))
|
||||
db.maybe_commit()
|
||||
|
||||
def del_pending_remark(db, sequence):
|
||||
|
||||
with db.conn.cursor() as cursor:
|
||||
qry = "SELECT remarks FROM raw_lines WHERE sequence = %s;"
|
||||
cursor.execute(qry, (sequence,))
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
remarks = row[0]
|
||||
rx = re.compile("^(<!-- @@DGL:PENDING@@ -->.*<!-- @@/DGL:PENDING@@ -->\n)")
|
||||
m = rx.match(remarks)
|
||||
if m is not None:
|
||||
remarks = rx.sub("",remarks)
|
||||
qry = "UPDATE raw_lines SET remarks = %s WHERE sequence = %s;"
|
||||
cursor.execute(qry, (remarks, sequence))
|
||||
db.maybe_commit()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
db.connect()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
@@ -37,38 +70,100 @@ if __name__ == '__main__':
|
||||
print("No final P1/11 configuration")
|
||||
exit(0)
|
||||
|
||||
pattern = final_p111["pattern"]
|
||||
rx = re.compile(pattern["regex"])
|
||||
|
||||
lineNameInfo = final_p111.get("lineNameInfo")
|
||||
pattern = final_p111.get("pattern")
|
||||
if not lineNameInfo:
|
||||
if not pattern:
|
||||
print("ERROR! Missing final.p111.lineNameInfo in project configuration. Cannot import final P111")
|
||||
raise Exception("Missing final.p111.lineNameInfo")
|
||||
else:
|
||||
print("WARNING! No `lineNameInfo` in project configuration (final.p111). You should add it to the settings.")
|
||||
rx = None
|
||||
if pattern and pattern.get("regex"):
|
||||
rx = re.compile(pattern["regex"])
|
||||
|
||||
if "pending" in survey["final"]:
|
||||
pendingRx = re.compile(survey["final"]["pending"]["pattern"]["regex"])
|
||||
|
||||
for fileprefix in final_p111["paths"]:
|
||||
print(f"Path prefix: {fileprefix}")
|
||||
realprefix = configuration.translate_path(fileprefix)
|
||||
print(f"Path prefix: {fileprefix} → {realprefix}")
|
||||
|
||||
for globspec in final_p111["globs"]:
|
||||
for filepath in pathlib.Path(fileprefix).glob(globspec):
|
||||
filepath = str(filepath)
|
||||
print(f"Found {filepath}")
|
||||
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
|
||||
physical_filepath = str(physical_filepath)
|
||||
logical_filepath = configuration.untranslate_path(physical_filepath)
|
||||
print(f"Found {logical_filepath}")
|
||||
|
||||
if not db.file_in_db(filepath):
|
||||
print("Importing")
|
||||
pending = False
|
||||
if pendingRx:
|
||||
pending = pendingRx.search(physical_filepath) is not None
|
||||
|
||||
match = rx.match(os.path.basename(filepath))
|
||||
if not match:
|
||||
error_message = f"File path not match the expected format! ({filepath} ~ {pattern['regex']})"
|
||||
print(error_message, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
if not db.file_in_db(logical_filepath):
|
||||
|
||||
age = time.time() - os.path.getmtime(physical_filepath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", logical_filepath)
|
||||
continue
|
||||
|
||||
file_info = dict(zip(pattern["captures"], match.groups()))
|
||||
print("Importing")
|
||||
|
||||
p111_data = p111.from_file(filepath)
|
||||
if rx:
|
||||
match = rx.match(os.path.basename(logical_filepath))
|
||||
if not match:
|
||||
error_message = f"File path not match the expected format! ({logical_filepath} ~ {pattern['regex']})"
|
||||
print(error_message, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
continue
|
||||
|
||||
file_info = dict(zip(pattern["captures"], match.groups()))
|
||||
file_info["meta"] = {}
|
||||
|
||||
if lineNameInfo:
|
||||
basename = os.path.basename(physical_filepath)
|
||||
fields = lineNameInfo.get("fields", {})
|
||||
fixed = lineNameInfo.get("fixed")
|
||||
try:
|
||||
parsed_line = fwr.parse_line(basename, fields, fixed)
|
||||
except ValueError as err:
|
||||
parsed_line = "Line format error: " + str(err)
|
||||
if type(parsed_line) == str:
|
||||
print(parsed_line, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
continue
|
||||
|
||||
file_info = {}
|
||||
file_info["sequence"] = parsed_line["sequence"]
|
||||
file_info["line"] = parsed_line["line"]
|
||||
del(parsed_line["sequence"])
|
||||
del(parsed_line["line"])
|
||||
file_info["meta"] = {
|
||||
"fileInfo": parsed_line
|
||||
}
|
||||
|
||||
if pending:
|
||||
print("Skipping / removing final file because marked as PENDING", logical_filepath)
|
||||
db.del_sequence_final(file_info["sequence"])
|
||||
add_pending_remark(db, file_info["sequence"])
|
||||
continue
|
||||
else:
|
||||
del_pending_remark(db, file_info["sequence"])
|
||||
|
||||
p111_data = p111.from_file(physical_filepath)
|
||||
|
||||
print("Saving")
|
||||
|
||||
p111_records = p111.p111_type("S", p111_data)
|
||||
file_info["meta"]["lineName"] = p111.line_name(p111_data)
|
||||
|
||||
db.save_final_p111(p111_records, file_info, filepath, survey["epsg"])
|
||||
db.save_final_p111(p111_records, file_info, logical_filepath, survey["epsg"])
|
||||
else:
|
||||
print("Already in DB")
|
||||
if pending:
|
||||
print("Removing from database because marked as PENDING")
|
||||
db.del_sequence_final(file_info["sequence"])
|
||||
add_pending_remark(db, file_info["sequence"])
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import os
|
||||
import sys
|
||||
import pathlib
|
||||
import re
|
||||
import time
|
||||
import configuration
|
||||
import p190
|
||||
from datastore import Datastore
|
||||
@@ -20,6 +21,7 @@ if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
@@ -49,6 +51,12 @@ if __name__ == '__main__':
|
||||
print(f"Found {filepath}")
|
||||
|
||||
if not db.file_in_db(filepath):
|
||||
|
||||
age = time.time() - os.path.getmtime(filepath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", filepath)
|
||||
continue
|
||||
|
||||
print("Importing")
|
||||
|
||||
match = rx.match(os.path.basename(filepath))
|
||||
|
||||
127
bin/import_map_layers.py
Executable file
127
bin/import_map_layers.py
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Import SmartSource data.
|
||||
|
||||
For each survey in configuration.surveys(), check for new
|
||||
or modified final gun header files and (re-)import them into the
|
||||
database.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pathlib
|
||||
import re
|
||||
import time
|
||||
import json
|
||||
import configuration
|
||||
from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
Imports map layers from the directories defined in the configuration object
|
||||
`import.map.layers`. The content of that key is an object with the following
|
||||
structure:
|
||||
|
||||
{
|
||||
layer1Name: [
|
||||
format: "geojson",
|
||||
path: "…", // Logical path to a directory
|
||||
globs: [
|
||||
"**/*.geojson", // List of globs matching map data files
|
||||
…
|
||||
]
|
||||
],
|
||||
|
||||
layer2Name: …
|
||||
…
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def process (layer_name, layer, physical_filepath):
|
||||
physical_filepath = str(physical_filepath)
|
||||
logical_filepath = configuration.untranslate_path(physical_filepath)
|
||||
print(f"Found {logical_filepath}")
|
||||
|
||||
if not db.file_in_db(logical_filepath):
|
||||
|
||||
age = time.time() - os.path.getmtime(physical_filepath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", logical_filepath)
|
||||
return
|
||||
|
||||
print("Importing")
|
||||
|
||||
file_info = {
|
||||
"type": "map_layer",
|
||||
"format": layer["format"],
|
||||
"name": layer_name,
|
||||
"tooltip": layer.get("tooltip"),
|
||||
"popup": layer.get("popup")
|
||||
}
|
||||
|
||||
db.save_file_data(logical_filepath, json.dumps(file_info))
|
||||
|
||||
else:
|
||||
file_info = db.get_file_data(logical_filepath)
|
||||
dirty = False
|
||||
if file_info:
|
||||
if file_info["name"] != layer_name:
|
||||
print("Renaming to", layer_name)
|
||||
file_info["name"] = layer_name
|
||||
dirty = True
|
||||
if file_info.get("tooltip") != layer.get("tooltip"):
|
||||
print("Changing tooltip to", layer.get("tooltip") or "null")
|
||||
file_info["tooltip"] = layer.get("tooltip")
|
||||
dirty = True
|
||||
if file_info.get("popup") != layer.get("popup"):
|
||||
print("Changing popup to", layer.get("popup") or "null")
|
||||
file_info["popup"] = layer.get("popup")
|
||||
dirty = True
|
||||
|
||||
if dirty:
|
||||
db.save_file_data(logical_filepath, json.dumps(file_info))
|
||||
else:
|
||||
print("Already in DB")
|
||||
|
||||
|
||||
print("Reading configuration")
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
print(f'Survey: {survey["id"]} ({survey["schema"]})')
|
||||
|
||||
db.set_survey(survey["schema"])
|
||||
|
||||
try:
|
||||
map_layers = survey["imports"]["map"]["layers"]
|
||||
except KeyError:
|
||||
print("No map layers defined")
|
||||
continue
|
||||
|
||||
for layer_name, layer_items in map_layers.items():
|
||||
|
||||
for layer in layer_items:
|
||||
fileprefix = layer["path"]
|
||||
realprefix = configuration.translate_path(fileprefix)
|
||||
|
||||
if os.path.isfile(realprefix):
|
||||
|
||||
process(layer_name, layer, realprefix)
|
||||
|
||||
elif os.path.isdir(realprefix):
|
||||
|
||||
if not "globs" in layer:
|
||||
layer["globs"] = [ "**/*.geojson" ]
|
||||
|
||||
for globspec in layer["globs"]:
|
||||
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
|
||||
process(layer_name, layer, physical_filepath)
|
||||
|
||||
print("Done")
|
||||
@@ -8,36 +8,59 @@ or modified preplots and (re-)import them into the database.
|
||||
"""
|
||||
|
||||
from glob import glob
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import configuration
|
||||
import preplots
|
||||
from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
def preplots_sorter (preplot):
|
||||
rank = {
|
||||
"x-sl+csv": 10
|
||||
}
|
||||
return rank.get(preplot.get("type"), 0)
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading configuration")
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
print(f'Survey: {survey["id"]} ({survey["schema"]})')
|
||||
db.set_survey(survey["schema"])
|
||||
for file in survey["preplots"]:
|
||||
|
||||
# We sort the preplots so that ancillary line info always comes last,
|
||||
# after the actual line + point data has been imported
|
||||
for file in sorted(survey["preplots"], key=preplots_sorter):
|
||||
realpath = configuration.translate_path(file["path"])
|
||||
|
||||
print(f"Preplot: {file['path']}")
|
||||
if not db.file_in_db(file["path"]):
|
||||
|
||||
age = time.time() - os.path.getmtime(realpath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", file["path"])
|
||||
continue
|
||||
|
||||
print("Importing")
|
||||
try:
|
||||
preplot = preplots.from_file(file)
|
||||
preplot = preplots.from_file(file, realpath)
|
||||
except FileNotFoundError:
|
||||
print(f"File does not exist: {file['path']}", file=sys.stderr)
|
||||
continue
|
||||
|
||||
if type(preplot) is list:
|
||||
print("Saving to DB")
|
||||
db.save_preplots(preplot, file["path"], file["class"], survey["epsg"], file)
|
||||
if file.get("type") == "x-sl+csv":
|
||||
db.save_preplot_line_info(preplot, file["path"], file)
|
||||
else:
|
||||
db.save_preplots(preplot, file["path"], file["class"], survey["epsg"], file)
|
||||
elif type(preplot) is str:
|
||||
print(preplot)
|
||||
else:
|
||||
|
||||
@@ -12,18 +12,20 @@ import os
|
||||
import sys
|
||||
import pathlib
|
||||
import re
|
||||
import time
|
||||
import configuration
|
||||
import p111
|
||||
import fwr
|
||||
from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
db.connect()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
@@ -37,50 +39,95 @@ if __name__ == '__main__':
|
||||
print("No raw P1/11 configuration")
|
||||
exit(0)
|
||||
|
||||
pattern = raw_p111["pattern"]
|
||||
rx = re.compile(pattern["regex"])
|
||||
lineNameInfo = raw_p111.get("lineNameInfo")
|
||||
pattern = raw_p111.get("pattern")
|
||||
if not lineNameInfo:
|
||||
if not pattern:
|
||||
print("ERROR! Missing raw.p111.lineNameInfo in project configuration. Cannot import raw P111")
|
||||
raise Exception("Missing raw.p111.lineNameInfo")
|
||||
else:
|
||||
print("WARNING! No `lineNameInfo` in project configuration (raw.p111). You should add it to the settings.")
|
||||
rx = None
|
||||
if pattern and pattern.get("regex"):
|
||||
rx = re.compile(pattern["regex"])
|
||||
|
||||
if "ntbp" in survey["raw"]:
|
||||
ntbpRx = re.compile(survey["raw"]["ntbp"]["pattern"]["regex"])
|
||||
|
||||
for fileprefix in raw_p111["paths"]:
|
||||
print(f"Path prefix: {fileprefix}")
|
||||
realprefix = configuration.translate_path(fileprefix)
|
||||
print(f"Path prefix: {fileprefix} → {realprefix}")
|
||||
|
||||
for globspec in raw_p111["globs"]:
|
||||
for filepath in pathlib.Path(fileprefix).glob(globspec):
|
||||
filepath = str(filepath)
|
||||
print(f"Found {filepath}")
|
||||
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
|
||||
physical_filepath = str(physical_filepath)
|
||||
logical_filepath = configuration.untranslate_path(physical_filepath)
|
||||
print(f"Found {logical_filepath}")
|
||||
|
||||
if ntbpRx:
|
||||
ntbp = ntbpRx.search(filepath) is not None
|
||||
ntbp = ntbpRx.search(physical_filepath) is not None
|
||||
else:
|
||||
ntbp = False
|
||||
|
||||
if not db.file_in_db(filepath):
|
||||
print("Importing")
|
||||
if not db.file_in_db(logical_filepath):
|
||||
|
||||
match = rx.match(os.path.basename(filepath))
|
||||
if not match:
|
||||
error_message = f"File path not match the expected format! ({filepath} ~ {pattern['regex']})"
|
||||
print(error_message, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
age = time.time() - os.path.getmtime(physical_filepath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", logical_filepath)
|
||||
continue
|
||||
|
||||
file_info = dict(zip(pattern["captures"], match.groups()))
|
||||
print("Importing")
|
||||
|
||||
p111_data = p111.from_file(filepath)
|
||||
if rx:
|
||||
match = rx.match(os.path.basename(logical_filepath))
|
||||
if not match:
|
||||
error_message = f"File path not matching the expected format! ({logical_filepath} ~ {pattern['regex']})"
|
||||
print(error_message, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
continue
|
||||
|
||||
file_info = dict(zip(pattern["captures"], match.groups()))
|
||||
file_info["meta"] = {}
|
||||
|
||||
if lineNameInfo:
|
||||
basename = os.path.basename(physical_filepath)
|
||||
fields = lineNameInfo.get("fields", {})
|
||||
fixed = lineNameInfo.get("fixed")
|
||||
try:
|
||||
parsed_line = fwr.parse_line(basename, fields, fixed)
|
||||
except ValueError as err:
|
||||
parsed_line = "Line format error: " + str(err)
|
||||
if type(parsed_line) == str:
|
||||
print(parsed_line, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
continue
|
||||
|
||||
file_info = {}
|
||||
file_info["sequence"] = parsed_line["sequence"]
|
||||
file_info["line"] = parsed_line["line"]
|
||||
del(parsed_line["sequence"])
|
||||
del(parsed_line["line"])
|
||||
file_info["meta"] = {
|
||||
"fileInfo": parsed_line
|
||||
}
|
||||
|
||||
p111_data = p111.from_file(physical_filepath)
|
||||
|
||||
print("Saving")
|
||||
|
||||
p111_records = p111.p111_type("S", p111_data)
|
||||
if len(p111_records):
|
||||
file_info["meta"]["lineName"] = p111.line_name(p111_data)
|
||||
|
||||
db.save_raw_p111(p111_records, file_info, filepath, survey["epsg"], ntbp=ntbp)
|
||||
db.save_raw_p111(p111_records, file_info, logical_filepath, survey["epsg"], ntbp=ntbp)
|
||||
else:
|
||||
print("No source records found in file")
|
||||
else:
|
||||
print("Already in DB")
|
||||
|
||||
# Update the NTBP status to whatever the latest is,
|
||||
# as it might have changed.
|
||||
db.set_ntbp(filepath, ntbp)
|
||||
db.set_ntbp(logical_filepath, ntbp)
|
||||
if ntbp:
|
||||
print("Sequence is NTBP")
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import os
|
||||
import sys
|
||||
import pathlib
|
||||
import re
|
||||
import time
|
||||
import configuration
|
||||
import p190
|
||||
from datastore import Datastore
|
||||
@@ -20,6 +21,7 @@ if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
@@ -52,6 +54,12 @@ if __name__ == '__main__':
|
||||
print(f"Found {filepath}")
|
||||
|
||||
if not db.file_in_db(filepath):
|
||||
|
||||
age = time.time() - os.path.getmtime(filepath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", filepath)
|
||||
continue
|
||||
|
||||
print("Importing")
|
||||
|
||||
match = rx.match(os.path.basename(filepath))
|
||||
|
||||
@@ -12,18 +12,20 @@ import os
|
||||
import sys
|
||||
import pathlib
|
||||
import re
|
||||
import time
|
||||
import configuration
|
||||
import smsrc
|
||||
import fwr
|
||||
from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
db.connect()
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
@@ -32,43 +34,80 @@ if __name__ == '__main__':
|
||||
db.set_survey(survey["schema"])
|
||||
|
||||
try:
|
||||
raw_smsrc = survey["raw"]["smsrc"]
|
||||
raw_smsrc = survey["raw"]["source"]["smsrc"]["header"]
|
||||
except KeyError:
|
||||
print("No SmartSource data configuration")
|
||||
continue
|
||||
|
||||
flags = 0
|
||||
if "flags" in raw_smsrc:
|
||||
configuration.rxflags(raw_smsrc["flags"])
|
||||
# NOTE I've no idea what this is 🤔
|
||||
# flags = 0
|
||||
# if "flags" in raw_smsrc:
|
||||
# configuration.rxflags(raw_smsrc["flags"])
|
||||
|
||||
pattern = raw_smsrc["pattern"]
|
||||
rx = re.compile(pattern["regex"], flags)
|
||||
lineNameInfo = raw_smsrc.get("lineNameInfo")
|
||||
pattern = raw_smsrc.get("pattern")
|
||||
rx = None
|
||||
if pattern and pattern.get("regex"):
|
||||
rx = re.compile(pattern["regex"])
|
||||
|
||||
for fileprefix in raw_smsrc["paths"]:
|
||||
print(f"Path prefix: {fileprefix}")
|
||||
realprefix = configuration.translate_path(fileprefix)
|
||||
print(f"Path prefix: {fileprefix} → {realprefix}")
|
||||
|
||||
for globspec in raw_smsrc["globs"]:
|
||||
for filepath in pathlib.Path(fileprefix).glob(globspec):
|
||||
filepath = str(filepath)
|
||||
print(f"Found {filepath}")
|
||||
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
|
||||
physical_filepath = str(physical_filepath)
|
||||
logical_filepath = configuration.untranslate_path(physical_filepath)
|
||||
print(f"Found {logical_filepath}")
|
||||
|
||||
if not db.file_in_db(filepath):
|
||||
print("Importing")
|
||||
if not db.file_in_db(logical_filepath):
|
||||
|
||||
match = rx.match(os.path.basename(filepath))
|
||||
if not match:
|
||||
error_message = f"File path not matching the expected format! ({filepath} ~ {pattern['regex']})"
|
||||
print(error_message, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
age = time.time() - os.path.getmtime(physical_filepath)
|
||||
if age < file_min_age:
|
||||
print("Skipping file because too new", logical_filepath)
|
||||
continue
|
||||
|
||||
file_info = dict(zip(pattern["captures"], match.groups()))
|
||||
print("Importing")
|
||||
|
||||
smsrc_records = smsrc.from_file(filepath)
|
||||
if rx:
|
||||
match = rx.match(os.path.basename(logical_filepath))
|
||||
if not match:
|
||||
error_message = f"File path not matching the expected format! ({logical_filepath} ~ {pattern['regex']})"
|
||||
print(error_message, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
continue
|
||||
|
||||
file_info = dict(zip(pattern["captures"], match.groups()))
|
||||
file_info["meta"] = {}
|
||||
|
||||
|
||||
if lineNameInfo:
|
||||
basename = os.path.basename(physical_filepath)
|
||||
fields = lineNameInfo.get("fields", {})
|
||||
fixed = lineNameInfo.get("fixed")
|
||||
try:
|
||||
parsed_line = fwr.parse_line(basename, fields, fixed)
|
||||
except ValueError as err:
|
||||
parsed_line = "Line format error: " + str(err)
|
||||
if type(parsed_line) == str:
|
||||
print(parsed_line, file=sys.stderr)
|
||||
print("This file will be ignored!")
|
||||
continue
|
||||
|
||||
file_info = {}
|
||||
file_info["sequence"] = parsed_line["sequence"]
|
||||
file_info["line"] = parsed_line["line"]
|
||||
del(parsed_line["sequence"])
|
||||
del(parsed_line["line"])
|
||||
file_info["meta"] = {
|
||||
"fileInfo": parsed_line
|
||||
}
|
||||
|
||||
smsrc_records = smsrc.from_file(physical_filepath)
|
||||
|
||||
print("Saving")
|
||||
|
||||
db.save_raw_smsrc(smsrc_records, file_info, filepath)
|
||||
db.save_raw_smsrc(smsrc_records, file_info, logical_filepath)
|
||||
else:
|
||||
print("Already in DB")
|
||||
|
||||
|
||||
@@ -15,25 +15,4 @@ from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
configs = configuration.files(include_archived = True)
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
#db.connect()
|
||||
|
||||
print("Reading surveys")
|
||||
for config in configs:
|
||||
filepath = config[0]
|
||||
survey = config[1]
|
||||
print(f'Survey: {survey["id"]} ({filepath})')
|
||||
db.set_survey(survey["schema"])
|
||||
if not db.file_in_db(filepath):
|
||||
print("Saving to DB")
|
||||
db.save_file_data(filepath, json.dumps(survey))
|
||||
print("Applying survey configuration")
|
||||
db.apply_survey_configuration()
|
||||
else:
|
||||
print("Already in DB")
|
||||
|
||||
print("Done")
|
||||
print("This function is obsolete. Returning with no action")
|
||||
|
||||
48
bin/insert_event.py
Executable file
48
bin/insert_event.py
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
from datetime import datetime
|
||||
from datastore import Datastore
|
||||
|
||||
def detect_schema (conn):
|
||||
with conn.cursor() as cursor:
|
||||
qry = "SELECT meta->>'_schema' AS schema, tstamp, age(current_timestamp, tstamp) age FROM real_time_inputs WHERE meta ? '_schema' AND age(current_timestamp, tstamp) < '02:00:00' ORDER BY tstamp DESC LIMIT 1"
|
||||
cursor.execute(qry)
|
||||
res = cursor.fetchone()
|
||||
if res and len(res):
|
||||
return res[0]
|
||||
return None
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("-s", "--schema", required=False, default=None, help="survey where to insert the event")
|
||||
ap.add_argument("-t", "--tstamp", required=False, default=None, help="event timestamp")
|
||||
ap.add_argument("-l", "--label", required=False, default=None, action="append", help="event label")
|
||||
ap.add_argument('remarks', type=str, nargs="+", help="event message")
|
||||
args = vars(ap.parse_args())
|
||||
|
||||
|
||||
db = Datastore()
|
||||
db.connect()
|
||||
|
||||
if args["schema"]:
|
||||
schema = args["schema"]
|
||||
else:
|
||||
schema = detect_schema(db.conn)
|
||||
|
||||
if args["tstamp"]:
|
||||
tstamp = args["tstamp"]
|
||||
else:
|
||||
tstamp = datetime.utcnow().isoformat()
|
||||
|
||||
message = " ".join(args["remarks"])
|
||||
|
||||
print("new event:", schema, tstamp, message, args["label"])
|
||||
|
||||
if schema and tstamp and message:
|
||||
db.set_survey(schema)
|
||||
with db.conn.cursor() as cursor:
|
||||
qry = "INSERT INTO event_log (tstamp, remarks, labels) VALUES (%s, replace_placeholders(%s, %s, NULL, NULL), %s);"
|
||||
cursor.execute(qry, (tstamp, message, tstamp, args["label"]))
|
||||
db.maybe_commit()
|
||||
@@ -7,7 +7,6 @@ P1/11 parsing functions.
|
||||
import math
|
||||
import re
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from parse_fwr import parse_fwr
|
||||
|
||||
def _int (string):
|
||||
return int(float(string))
|
||||
@@ -153,6 +152,9 @@ def parse_line (string):
|
||||
return None
|
||||
|
||||
|
||||
def line_name(records):
|
||||
return set([ r['Acquisition Line Name'] for r in p111_type("S", records) ]).pop()
|
||||
|
||||
def p111_type(type, records):
|
||||
return [ r for r in records if r["type"] == type ]
|
||||
|
||||
|
||||
36
bin/p190.py
36
bin/p190.py
@@ -12,7 +12,7 @@ from parse_fwr import parse_fwr
|
||||
|
||||
def parse_p190_header (string):
|
||||
"""Parse a generic P1/90 header record.
|
||||
|
||||
|
||||
Returns a dictionary of fields.
|
||||
"""
|
||||
names = [ "record_type", "header_type", "header_type_modifier", "description", "data" ]
|
||||
@@ -27,7 +27,7 @@ def parse_p190_type1 (string):
|
||||
"doy", "time", "spare2" ]
|
||||
record = parse_fwr(string, [1, 12, 3, 1, 1, 1, 6, 10, 11, 9, 9, 6, 3, 6, 1])
|
||||
return dict(zip(names, record))
|
||||
|
||||
|
||||
def parse_p190_rcv_group (string):
|
||||
"""Parse a P1/90 Type 1 receiver group record."""
|
||||
names = [ "record_type",
|
||||
@@ -37,7 +37,7 @@ def parse_p190_rcv_group (string):
|
||||
"streamer_id" ]
|
||||
record = parse_fwr(string, [1, 4, 9, 9, 4, 4, 9, 9, 4, 4, 9, 9, 4, 1])
|
||||
return dict(zip(names, record))
|
||||
|
||||
|
||||
def parse_line (string):
|
||||
type = string[0]
|
||||
if string[:3] == "EOF":
|
||||
@@ -52,7 +52,7 @@ def parse_line (string):
|
||||
|
||||
def p190_type(type, records):
|
||||
return [ r for r in records if r["record_type"] == type ]
|
||||
|
||||
|
||||
def p190_header(code, records):
|
||||
return [ h for h in p190_type("H", records) if h["header_type"]+h["header_type_modifier"] == code ]
|
||||
|
||||
@@ -86,15 +86,15 @@ def normalise_record(record):
|
||||
# These are probably strings
|
||||
elif "strip" in dir(record[key]):
|
||||
record[key] = record[key].strip()
|
||||
|
||||
|
||||
return record
|
||||
|
||||
|
||||
def normalise(records):
|
||||
for record in records:
|
||||
normalise_record(record)
|
||||
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
|
||||
records = []
|
||||
with open(path) as fd:
|
||||
@@ -102,10 +102,10 @@ def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
|
||||
line = fd.readline()
|
||||
while line:
|
||||
cnt = cnt + 1
|
||||
|
||||
|
||||
if line == "EOF":
|
||||
break
|
||||
|
||||
|
||||
record = parse_line(line)
|
||||
if record is not None:
|
||||
if only_records:
|
||||
@@ -121,9 +121,9 @@ def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
|
||||
|
||||
records.append(record)
|
||||
line = fd.readline()
|
||||
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def apply_tstamps(recordset, tstamp=None, fix_bad_seconds=False):
|
||||
#print("tstamp", tstamp, type(tstamp))
|
||||
if type(tstamp) is int:
|
||||
@@ -161,16 +161,16 @@ def apply_tstamps(recordset, tstamp=None, fix_bad_seconds=False):
|
||||
record["tstamp"] = ts
|
||||
prev[object_id(record)] = doy
|
||||
break
|
||||
|
||||
|
||||
return recordset
|
||||
|
||||
|
||||
def dms(value):
|
||||
# 591544.61N
|
||||
hemisphere = 1 if value[-1] in "NnEe" else -1
|
||||
seconds = float(value[-6:-1])
|
||||
minutes = int(value[-8:-6])
|
||||
degrees = int(value[:-8])
|
||||
|
||||
|
||||
return (degrees + minutes/60 + seconds/3600) * hemisphere
|
||||
|
||||
def tod(record):
|
||||
@@ -183,7 +183,7 @@ def tod(record):
|
||||
m = int(time[2:4])
|
||||
s = float(time[4:])
|
||||
return d*86400 + h*3600 + m*60 + s
|
||||
|
||||
|
||||
def duration(record0, record1):
|
||||
ts0 = tod(record0)
|
||||
ts1 = tod(record1)
|
||||
@@ -198,10 +198,10 @@ def azimuth(record0, record1):
|
||||
x0, y0 = float(record0["easting"]), float(record0["northing"])
|
||||
x1, y1 = float(record1["easting"]), float(record1["northing"])
|
||||
return math.degrees(math.atan2(x1-x0, y1-y0)) % 360
|
||||
|
||||
|
||||
def speed(record0, record1, knots=False):
|
||||
scale = 3600/1852 if knots else 1
|
||||
t0 = tod(record0)
|
||||
t1 = tod(record1)
|
||||
return (distance(record0, record1) / math.fabs(t1-t0)) * scale
|
||||
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
def parse_fwr (string, widths, start=0):
|
||||
"""Parse a fixed-width record.
|
||||
|
||||
string: the string to parse.
|
||||
widths: a list of record widths. A negative width denotes a field to be skipped.
|
||||
start: optional start index.
|
||||
|
||||
Returns a list of strings.
|
||||
"""
|
||||
results = []
|
||||
current_index = start
|
||||
for width in widths:
|
||||
if width > 0:
|
||||
results.append(string[current_index : current_index + width])
|
||||
current_index += width
|
||||
else:
|
||||
current_index -= width
|
||||
|
||||
return results
|
||||
@@ -1,14 +1,51 @@
|
||||
import sps
|
||||
import fwr
|
||||
import delimited
|
||||
|
||||
"""
|
||||
Preplot importing functions.
|
||||
"""
|
||||
|
||||
def from_file (file):
|
||||
if not "type" in file or file["type"] == "sps":
|
||||
records = sps.from_file(file["path"], file["format"] if "format" in file else None )
|
||||
|
||||
def is_fixed_width (file):
|
||||
fixed_width_types = [ "sps1", "sps21", "p190", "fixed-width" ]
|
||||
return type(file) == dict and "type" in file and file["type"] in fixed_width_types
|
||||
|
||||
def is_delimited (file):
|
||||
delimited_types = [ "csv", "p111", "x-sl+csv" ]
|
||||
return type(file) == dict and "type" in file and file["type"] in delimited_types
|
||||
|
||||
def from_file (file, realpath = None):
|
||||
"""
|
||||
Return a list of dicts, where each dict has the structure:
|
||||
{
|
||||
"line_name": <int>,
|
||||
"points": [
|
||||
{
|
||||
"line_name": <int>,
|
||||
"point_number": <int>,
|
||||
"easting": <float>,
|
||||
"northing": <float>
|
||||
},
|
||||
…
|
||||
]
|
||||
}
|
||||
On error, return a string describing the error condition.
|
||||
"""
|
||||
|
||||
filepath = realpath or file["path"]
|
||||
if is_fixed_width(file):
|
||||
records = fwr.from_file(filepath, file)
|
||||
elif is_delimited(file):
|
||||
records = delimited.from_file(filepath, file)
|
||||
else:
|
||||
return "Not an SPS file"
|
||||
return "Unrecognised file format"
|
||||
|
||||
if type(records) == str:
|
||||
# This is an error message
|
||||
return records
|
||||
|
||||
if file.get("type") == "x-sl+csv":
|
||||
return records
|
||||
|
||||
lines = []
|
||||
line_names = set([r["line_name"] for r in records])
|
||||
|
||||
@@ -13,21 +13,27 @@ from datastore import Datastore
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = configuration.surveys()
|
||||
|
||||
print("Connecting to database")
|
||||
db = Datastore()
|
||||
|
||||
print("Reading configuration")
|
||||
surveys = db.surveys()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
print(f'Survey: {survey["id"]} ({survey["schema"]})')
|
||||
db.set_survey(survey["schema"])
|
||||
|
||||
for file in db.list_files():
|
||||
path = file[0]
|
||||
if not os.path.exists(path):
|
||||
print(path, "NOT FOUND")
|
||||
db.del_file(path)
|
||||
try:
|
||||
path = configuration.translate_path(file[0])
|
||||
if not os.path.exists(path):
|
||||
print(path, "NOT FOUND")
|
||||
db.del_file(file[0])
|
||||
except TypeError:
|
||||
# In case the logical path no longer matches
|
||||
# the Dougal configuration.
|
||||
print(file[0], "COULD NOT BE TRANSLATED TO A PHYSICAL PATH. DELETING")
|
||||
db.del_file(file[0])
|
||||
|
||||
print("Done")
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
DOUGAL_ROOT=${DOUGAL_ROOT:-$(dirname "$0")/..}
|
||||
|
||||
BINDIR="$DOUGAL_ROOT/bin"
|
||||
@@ -8,6 +9,20 @@ LOCKFILE=${LOCKFILE:-$VARDIR/runner.lock}
|
||||
|
||||
[ -f ~/.profile ] && . ~/.profile
|
||||
|
||||
DOUGAL_LOG_TAG="dougal.runner[$$]"
|
||||
|
||||
# Only send output to the logger if we have the appropriate
|
||||
# configuration set.
|
||||
if [[ -n "$DOUGAL_LOG_TAG" && -n "$DOUGAL_LOG_FACILITY" ]]; then
|
||||
function _logger () {
|
||||
logger $*
|
||||
}
|
||||
else
|
||||
function _logger () {
|
||||
: # This is the Bash null command
|
||||
}
|
||||
fi
|
||||
|
||||
function tstamp () {
|
||||
date -u +%Y-%m-%dT%H:%M:%SZ
|
||||
}
|
||||
@@ -18,26 +33,44 @@ function prefix () {
|
||||
|
||||
function print_log () {
|
||||
printf "$(prefix)\033[36m%s\033[0m\n" "$*"
|
||||
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.info" "$*"
|
||||
}
|
||||
|
||||
function print_info () {
|
||||
printf "$(prefix)\033[0m%s\n" "$*"
|
||||
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.debug" "$*"
|
||||
}
|
||||
|
||||
function print_warning () {
|
||||
printf "$(prefix)\033[33;1m%s\033[0m\n" "$*"
|
||||
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.warning" "$*"
|
||||
}
|
||||
|
||||
function print_error () {
|
||||
printf "$(prefix)\033[31m%s\033[0m\n" "$*"
|
||||
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.error" "$*"
|
||||
}
|
||||
|
||||
function run () {
|
||||
PROGNAME=$(basename "$1")
|
||||
PROGNAME=${PROGNAME:-$(basename "$1")}
|
||||
|
||||
STDOUTLOG="$VARDIR/$PROGNAME.out"
|
||||
STDERRLOG="$VARDIR/$PROGNAME.err"
|
||||
|
||||
"$1" >"$STDOUTLOG" 2>"$STDERRLOG" || {
|
||||
# What follows runs the command that we have been given (with any arguments passed)
|
||||
# and logs:
|
||||
# * stdout to $STDOUTLOG (a temporary file) and possibly to syslog, if enabled.
|
||||
# * stderr to $STDERRLOG (a temporary file) and possibly to syslog, if enabled.
|
||||
#
|
||||
# When logging to syslog, stdout goes as debug level and stderr as warning (not error)
|
||||
#
|
||||
# The temporary file is used in case the command fails, at which point we try to log
|
||||
# a warning in GitLab's alerts facility.
|
||||
|
||||
$* \
|
||||
> >(tee $STDOUTLOG |_logger -t "dougal.runner.$PROGNAME[$$]" -p "$DOUGAL_LOG_FACILITY.debug") \
|
||||
2> >(tee $STDERRLOG |_logger -t "dougal.runner.$PROGNAME[$$]" -p "$DOUGAL_LOG_FACILITY.warning") || {
|
||||
|
||||
print_error "Failed: $PROGNAME"
|
||||
cat $STDOUTLOG
|
||||
cat $STDERRLOG
|
||||
@@ -52,11 +85,17 @@ function run () {
|
||||
|
||||
exit 2
|
||||
}
|
||||
# cat $STDOUTLOG
|
||||
|
||||
unset PROGNAME
|
||||
rm $STDOUTLOG $STDERRLOG
|
||||
}
|
||||
|
||||
function cleanup () {
|
||||
if [[ -f $LOCKFILE ]]; then
|
||||
rm "$LOCKFILE"
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ -f $LOCKFILE ]]; then
|
||||
PID=$(cat "$LOCKFILE")
|
||||
if pgrep -F "$LOCKFILE"; then
|
||||
@@ -74,6 +113,13 @@ echo "$$" > "$LOCKFILE" || {
|
||||
}
|
||||
print_info "Start run"
|
||||
|
||||
print_log "Check if data is accessible"
|
||||
$BINDIR/check_mounts_present.py || {
|
||||
print_warning "Import mounts not accessible. Inhibiting all tasks!"
|
||||
cleanup
|
||||
exit 253
|
||||
}
|
||||
|
||||
print_log "Purge deleted files"
|
||||
run $BINDIR/purge_deleted_files.py
|
||||
|
||||
@@ -86,33 +132,47 @@ run $BINDIR/import_preplots.py
|
||||
print_log "Import raw P1/11"
|
||||
run $BINDIR/import_raw_p111.py
|
||||
|
||||
print_log "Import raw P1/90"
|
||||
run $BINDIR/import_raw_p190.py
|
||||
#print_log "Import raw P1/90"
|
||||
#run $BINDIR/import_raw_p190.py
|
||||
|
||||
print_log "Import final P1/11"
|
||||
run $BINDIR/import_final_p111.py
|
||||
|
||||
print_log "Import final P1/90"
|
||||
run $BINDIR/import_final_p190.py
|
||||
#print_log "Import final P1/90"
|
||||
#run $BINDIR/import_final_p190.py
|
||||
|
||||
print_log "Import SmartSource data"
|
||||
run $BINDIR/import_smsrc.py
|
||||
|
||||
if [[ -z "$RUNNER_NOEXPORT" ]]; then
|
||||
print_log "Export system data"
|
||||
run $BINDIR/system_exports.py
|
||||
fi
|
||||
print_log "Import map user layers"
|
||||
run $BINDIR/import_map_layers.py
|
||||
|
||||
if [[ -n "$RUNNER_IMPORT" ]]; then
|
||||
print_log "Import system data"
|
||||
run $BINDIR/system_imports.py
|
||||
fi
|
||||
# if [[ -z "$RUNNER_NOEXPORT" ]]; then
|
||||
# print_log "Export system data"
|
||||
# run $BINDIR/system_exports.py
|
||||
# fi
|
||||
|
||||
print_log "Export QC data"
|
||||
run $BINDIR/human_exports_qc.py
|
||||
# if [[ -n "$RUNNER_IMPORT" ]]; then
|
||||
# print_log "Import system data"
|
||||
# run $BINDIR/system_imports.py
|
||||
# fi
|
||||
|
||||
print_log "Export sequence data"
|
||||
run $BINDIR/human_exports_seis.py
|
||||
# print_log "Export QC data"
|
||||
# run $BINDIR/human_exports_qc.py
|
||||
|
||||
# print_log "Export sequence data"
|
||||
# run $BINDIR/human_exports_seis.py
|
||||
|
||||
print_log "Process ASAQC queue"
|
||||
# Run insecure in test mode:
|
||||
# export NODE_TLS_REJECT_UNAUTHORIZED=0
|
||||
PROGNAME=asaqc_queue run $DOUGAL_ROOT/lib/www/server/queues/asaqc/index.js
|
||||
|
||||
print_log "Run database housekeeping actions"
|
||||
run $BINDIR/housekeep_database.py
|
||||
|
||||
print_log "Run QCs"
|
||||
PROGNAME=run_qc run $DOUGAL_ROOT/lib/www/server/lib/qc/index.js
|
||||
|
||||
|
||||
rm "$LOCKFILE"
|
||||
|
||||
51
bin/sps.py
51
bin/sps.py
@@ -1,51 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
SPS importing functions.
|
||||
|
||||
And by SPS, we mean more or less any line-delimited, fixed-width record format.
|
||||
"""
|
||||
|
||||
import builtins
|
||||
from parse_fwr import parse_fwr
|
||||
|
||||
def int (v):
|
||||
return builtins.int(float(v))
|
||||
|
||||
def parse_line (string, spec):
|
||||
"""Parse a line from an SPS file."""
|
||||
names = spec["names"]
|
||||
widths = spec["widths"]
|
||||
normalisers = spec["normalisers"]
|
||||
record = [ t[0](t[1]) for t in zip(normalisers, parse_fwr(string, widths)) ]
|
||||
return dict(zip(names, record))
|
||||
|
||||
def from_file(path, spec = None):
|
||||
if spec is None:
|
||||
spec = {
|
||||
"names": [ "line_name", "point_number", "easting", "northing" ],
|
||||
"widths": [ -1, 10, 10, -25, 10, 10 ],
|
||||
"normalisers": [ int, int, float, float ]
|
||||
}
|
||||
else:
|
||||
normaliser_tokens = [ "int", "float", "str", "bool" ]
|
||||
spec["normalisers"] = [ eval(t) for t in spec["types"] if t in normaliser_tokens ]
|
||||
|
||||
records = []
|
||||
with open(path) as fd:
|
||||
cnt = 0
|
||||
line = fd.readline()
|
||||
while line:
|
||||
cnt = cnt+1
|
||||
|
||||
if line == "EOF":
|
||||
break
|
||||
|
||||
record = parse_line(line, spec)
|
||||
if record is not None:
|
||||
records.append(record)
|
||||
|
||||
line = fd.readline()
|
||||
|
||||
del spec["normalisers"]
|
||||
return records
|
||||
@@ -24,6 +24,7 @@ locals().update(configuration.vars())
|
||||
exportables = {
|
||||
"public": {
|
||||
"projects": [ "meta" ],
|
||||
"info": None,
|
||||
"real_time_inputs": None
|
||||
},
|
||||
"survey": {
|
||||
@@ -32,12 +33,13 @@ exportables = {
|
||||
"preplot_lines": [ "remarks", "ntba", "meta" ],
|
||||
"preplot_points": [ "ntba", "meta" ],
|
||||
"raw_lines": [ "remarks", "meta" ],
|
||||
"raw_shots": [ "meta" ]
|
||||
"raw_shots": [ "meta" ],
|
||||
"planned_lines": None
|
||||
}
|
||||
}
|
||||
|
||||
def primary_key (table, cursor):
|
||||
|
||||
|
||||
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
|
||||
qry = """
|
||||
SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type
|
||||
@@ -48,7 +50,7 @@ def primary_key (table, cursor):
|
||||
WHERE i.indrelid = %s::regclass
|
||||
AND i.indisprimary;
|
||||
"""
|
||||
|
||||
|
||||
cursor.execute(qry, (table,))
|
||||
return cursor.fetchall()
|
||||
|
||||
|
||||
@@ -40,6 +40,10 @@ if __name__ == '__main__':
|
||||
continue
|
||||
|
||||
try:
|
||||
for table in exportables:
|
||||
path = os.path.join(pathPrefix, table)
|
||||
if os.path.exists(path):
|
||||
cursor.execute(f"DELETE FROM {table};")
|
||||
for table in exportables:
|
||||
path = os.path.join(pathPrefix, table)
|
||||
print(" ← ", path, " → ", table)
|
||||
|
||||
@@ -19,6 +19,7 @@ locals().update(configuration.vars())
|
||||
exportables = {
|
||||
"public": {
|
||||
"projects": [ "meta" ],
|
||||
"info": None,
|
||||
"real_time_inputs": None
|
||||
},
|
||||
"survey": {
|
||||
@@ -27,12 +28,13 @@ exportables = {
|
||||
"preplot_lines": [ "remarks", "ntba", "meta" ],
|
||||
"preplot_points": [ "ntba", "meta" ],
|
||||
"raw_lines": [ "remarks", "meta" ],
|
||||
"raw_shots": [ "meta" ]
|
||||
"raw_shots": [ "meta" ],
|
||||
"planned_lines": None
|
||||
}
|
||||
}
|
||||
|
||||
def primary_key (table, cursor):
|
||||
|
||||
|
||||
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
|
||||
qry = """
|
||||
SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type
|
||||
@@ -43,13 +45,13 @@ def primary_key (table, cursor):
|
||||
WHERE i.indrelid = %s::regclass
|
||||
AND i.indisprimary;
|
||||
"""
|
||||
|
||||
|
||||
cursor.execute(qry, (table,))
|
||||
return cursor.fetchall()
|
||||
|
||||
def import_table(fd, table, columns, cursor):
|
||||
pk = [ r[0] for r in primary_key(table, cursor) ]
|
||||
|
||||
|
||||
# Create temporary table to import into
|
||||
temptable = "import_"+table
|
||||
print("Creating temporary table", temptable)
|
||||
@@ -59,29 +61,29 @@ def import_table(fd, table, columns, cursor):
|
||||
AS SELECT {', '.join(pk + columns)} FROM {table}
|
||||
WITH NO DATA;
|
||||
"""
|
||||
|
||||
|
||||
#print(qry)
|
||||
cursor.execute(qry)
|
||||
|
||||
|
||||
# Import into the temp table
|
||||
print("Import data into temporary table")
|
||||
cursor.copy_from(fd, temptable)
|
||||
|
||||
|
||||
# Update the destination table
|
||||
print("Updating destination table")
|
||||
setcols = ", ".join([ f"{c} = t.{c}" for c in columns ])
|
||||
wherecols = " AND ".join([ f"{table}.{c} = t.{c}" for c in pk ])
|
||||
|
||||
|
||||
qry = f"""
|
||||
UPDATE {table}
|
||||
SET {setcols}
|
||||
FROM {temptable} t
|
||||
WHERE {wherecols};
|
||||
"""
|
||||
|
||||
|
||||
#print(qry)
|
||||
cursor.execute(qry)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -96,16 +98,21 @@ if __name__ == '__main__':
|
||||
with db.conn.cursor() as cursor:
|
||||
columns = exportables["public"][table]
|
||||
path = os.path.join(VARDIR, "-"+table)
|
||||
with open(path, "rb") as fd:
|
||||
print(" →→ ", path, " ←← ", table, columns)
|
||||
if columns is not None:
|
||||
import_table(fd, table, columns, cursor)
|
||||
else:
|
||||
try:
|
||||
print(f"Copying from {path} into {table}")
|
||||
cursor.copy_from(fd, table)
|
||||
except psycopg2.errors.UniqueViolation:
|
||||
print(f"It looks like table {table} may have already been imported. Skipping it.")
|
||||
try:
|
||||
with open(path, "rb") as fd:
|
||||
print(" →→ ", path, " ←← ", table, columns)
|
||||
if columns is not None:
|
||||
import_table(fd, table, columns, cursor)
|
||||
else:
|
||||
try:
|
||||
print(f"Copying from {path} into {table}")
|
||||
cursor.copy_from(fd, table)
|
||||
except psycopg2.errors.UniqueViolation:
|
||||
print(f"It looks like table {table} may have already been imported. Skipping it.")
|
||||
except FileNotFoundError:
|
||||
print(f"File not found. Skipping {path}")
|
||||
|
||||
db.conn.commit()
|
||||
|
||||
print("Reading surveys")
|
||||
for survey in surveys:
|
||||
@@ -123,17 +130,20 @@ if __name__ == '__main__':
|
||||
columns = exportables["survey"][table]
|
||||
path = os.path.join(pathPrefix, "-"+table)
|
||||
print(" ←← ", path, " →→ ", table, columns)
|
||||
|
||||
with open(path, "rb") as fd:
|
||||
if columns is not None:
|
||||
import_table(fd, table, columns, cursor)
|
||||
else:
|
||||
try:
|
||||
print(f"Copying from {path} into {table}")
|
||||
cursor.copy_from(fd, table)
|
||||
except psycopg2.errors.UniqueViolation:
|
||||
print(f"It looks like table {table} may have already been imported. Skipping it.")
|
||||
|
||||
|
||||
try:
|
||||
with open(path, "rb") as fd:
|
||||
if columns is not None:
|
||||
import_table(fd, table, columns, cursor)
|
||||
else:
|
||||
try:
|
||||
print(f"Copying from {path} into {table}")
|
||||
cursor.copy_from(fd, table)
|
||||
except psycopg2.errors.UniqueViolation:
|
||||
print(f"It looks like table {table} may have already been imported. Skipping it.")
|
||||
except FileNotFoundError:
|
||||
print(f"File not found. Skipping {path}")
|
||||
|
||||
# If we don't commit the data does not actually get copied
|
||||
db.conn.commit()
|
||||
|
||||
|
||||
65
etc/config.example.yaml
Normal file
65
etc/config.example.yaml
Normal file
@@ -0,0 +1,65 @@
|
||||
|
||||
db:
|
||||
connection_string: "host=localhost port=5432 dbname=dougal user=postgres"
|
||||
|
||||
webhooks:
|
||||
alert:
|
||||
url: https://gitlab.com/wgp/dougal/software/alerts/notify.json
|
||||
authkey: ""
|
||||
# The authorisation key can be provided here or read from the
|
||||
# environment variable GITLAB_ALERTS_AUTHKEY. The environment
|
||||
# variable has precedence. It can be saved under the user's
|
||||
# Bash .profile. This is the recommended way to avoid accidentally
|
||||
# committing a security token into the git repository.
|
||||
|
||||
navigation:
|
||||
headers:
|
||||
-
|
||||
type: udp
|
||||
port: 30000
|
||||
meta:
|
||||
# Anything here gets passed as options to the packet
|
||||
# saving routine.
|
||||
epsg: 23031 # Assume this CRS for unqualified E/N data
|
||||
# Heuristics to apply to detect survey when offline
|
||||
offline_survey_heuristics: "nearest_preplot"
|
||||
# Apply the heuristics at most once every…
|
||||
offline_survey_detect_interval: 10000 # ms
|
||||
|
||||
|
||||
imports:
|
||||
# For a file to be imported, it must have been last modified at
|
||||
# least this many seconds ago.
|
||||
file_min_age: 60
|
||||
|
||||
# These paths refer to remote mounts which must be present in order
|
||||
# for imports to work. If any of these paths are empty, import actions
|
||||
# (including data deletion) will be inhibited. This is to cope with
|
||||
# things like transient network failures.
|
||||
mounts:
|
||||
- /srv/mnt/Data
|
||||
|
||||
# These paths can be exposed to end users via the API. They should
|
||||
# contain the locations were project data, or any other user data
|
||||
# that needs to be accessible by Dougal, is located.
|
||||
#
|
||||
# This key can be either a string or an object:
|
||||
# - If a string, it points to the root path for Dougal-accessible data.
|
||||
# - If an object, there is an implicit root and the first-level
|
||||
# paths are denoted by the keys, with the values being their
|
||||
# respective physical paths.
|
||||
# Non-absolute paths are relative to $DOUGAL_ROOT.
|
||||
paths: /srv/mnt/Data
|
||||
|
||||
queues:
|
||||
asaqc:
|
||||
request:
|
||||
url: "https://api.gateway.equinor.com/vt/v1/api/upload-file-encoded"
|
||||
args:
|
||||
method: POST
|
||||
headers:
|
||||
Content-Type: application/json
|
||||
httpsAgent: # The paths here are relative to $DOUGAL_ROOT
|
||||
cert: etc/ssl/asaqc.crt
|
||||
key: etc/ssl/asaqc.key
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
|
||||
db:
|
||||
connection_string: "host=localhost port=5432 dbname=dougal user=postgres"
|
||||
|
||||
webhooks:
|
||||
alert:
|
||||
url: https://gitlab.com/wgp/dougal/software/alerts/notify.json
|
||||
authkey: ""
|
||||
# The authorisation key can be provided here or read from the
|
||||
# environment variable GITLAB_ALERTS_AUTHKEY. The environment
|
||||
# variable has precedence. It can be saved under the user's
|
||||
# Bash .profile. This is the recommended way to avoid accidentally
|
||||
# committing a security token into the git repository.
|
||||
|
||||
navigation:
|
||||
headers:
|
||||
-
|
||||
type: udp
|
||||
port: 30000
|
||||
meta:
|
||||
# Anything here gets passed as options to the packet
|
||||
# saving routine.
|
||||
epsg: 23031 # Assume this CRS for unqualified E/N data
|
||||
|
||||
121
etc/db/README.md
121
etc/db/README.md
@@ -19,3 +19,124 @@ Created with:
|
||||
```bash
|
||||
SCHEMA_NAME=survey_X EPSG_CODE=XXXXX $DOUGAL_ROOT/sbin/dump_schema.sh
|
||||
```
|
||||
|
||||
## To create a new Dougal database
|
||||
|
||||
Ensure that the following packages are installed:
|
||||
|
||||
* `postgresql*-postgis-utils`
|
||||
* `postgresql*-postgis`
|
||||
* `postgresql*-contrib` # For B-trees
|
||||
|
||||
```bash
|
||||
psql -U postgres <./database-template.sql
|
||||
psql -U postgres <./database-version.sql
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Upgrading PostgreSQL
|
||||
|
||||
The following is based on https://en.opensuse.org/SDB:PostgreSQL#Upgrading_major_PostgreSQL_version
|
||||
|
||||
```bash
|
||||
# The following bash code should be checked and executed
|
||||
# line for line whenever you do an upgrade. The example
|
||||
# shows the upgrade process from an original installation
|
||||
# of version 12 up to version 14.
|
||||
|
||||
# install the new server as well as the required postgresql-contrib packages:
|
||||
zypper in postgresql14-server postgresql14-contrib postgresql12-contrib
|
||||
|
||||
# If not yet done, copy the configuration create a new PostgreSQL configuration directory...
|
||||
mkdir /etc/postgresql
|
||||
# and copy the original file to this global directory
|
||||
cd /srv/pgsql/data
|
||||
for i in pg_hba.conf pg_ident.conf postgresql.conf postgresql.auto.conf ; do cp -a $i /etc/postgresql/$i ; done
|
||||
|
||||
# Now create a new data-directory and initialize it for usage with the new server
|
||||
install -d -m 0700 -o postgres -g postgres /srv/pgsql/data14
|
||||
cd /srv/pgsql/data14
|
||||
sudo -u postgres /usr/lib/postgresql14/bin/initdb .
|
||||
|
||||
# replace the newly generated files by a symlink to the global files.
|
||||
# After doing so, you may check the difference of the created backup files and
|
||||
# the files from the former installation
|
||||
for i in pg_hba.conf pg_ident.conf postgresql.conf postgresql.auto.conf ; do old $i ; ln -s /etc/postgresql/$i .; done
|
||||
|
||||
# Copy over special thesaurus files if some exists.
|
||||
#cp -a /usr/share/postgresql12/tsearch_data/my_thesaurus_german.ths /usr/share/postgresql14/tsearch_data/
|
||||
|
||||
# Now it's time to disable the service...
|
||||
systemctl stop postgresql.service
|
||||
|
||||
# And to start the migration. Please ensure, the directories fit to your upgrade path
|
||||
sudo -u postgres /usr/lib/postgresql14/bin/pg_upgrade --link \
|
||||
--old-bindir="/usr/lib/postgresql12/bin" \
|
||||
--new-bindir="/usr/lib/postgresql14/bin" \
|
||||
--old-datadir="/srv/pgsql/data/" \
|
||||
--new-datadir="/srv/pgsql/data14/"
|
||||
|
||||
# NOTE: If getting the following error:
|
||||
# lc_collate values for database "postgres" do not match: old "en_US.UTF-8", new "C"
|
||||
# then:
|
||||
# cd ..
|
||||
# rm -rf /srv/pgsql/data14
|
||||
# install -d -m 0700 -o postgres -g postgres /srv/pgsql/data14
|
||||
# cd /srv/pgsql/data14
|
||||
# sudo -u postgres /usr/lib/postgresql14/bin/initdb --locale=en_US.UTF-8 .
|
||||
#
|
||||
# and repeat the migration command
|
||||
|
||||
# After successfully migrating the data...
|
||||
cd ..
|
||||
# if not already symlinked move the old data to a versioned directory matching
|
||||
# your old installation...
|
||||
mv data data12
|
||||
# and set a symlink to the new data directory
|
||||
ln -sf data14/ data
|
||||
|
||||
# Now start the new service
|
||||
systemctl start postgresql.service
|
||||
|
||||
# If everything has been sucessful, you should uninstall old packages...
|
||||
#zypper rm -u postgresql12 postgresql13
|
||||
# and remove old data directories
|
||||
#rm -rf /srv/pgsql/data_OLD_POSTGRES_VERSION_NUMBER
|
||||
|
||||
# For good measure:
|
||||
sudo -u postgres /usr/lib/postgresql14/bin/vacuumdb --all --analyze-in-stages
|
||||
|
||||
# If update_extensions.sql exists, apply it.
|
||||
```
|
||||
|
||||
# Restoring from backup
|
||||
|
||||
## Whole database
|
||||
|
||||
Ensure that nothing is connected to the database.
|
||||
|
||||
```bash
|
||||
psql -U postgres --dbname postgres <<EOF
|
||||
-- Database: dougal
|
||||
|
||||
DROP DATABASE IF EXISTS dougal;
|
||||
|
||||
CREATE DATABASE dougal
|
||||
WITH
|
||||
OWNER = postgres
|
||||
ENCODING = 'UTF8'
|
||||
LC_COLLATE = 'en_GB.UTF-8'
|
||||
LC_CTYPE = 'en_GB.UTF-8'
|
||||
TABLESPACE = pg_default
|
||||
CONNECTION LIMIT = -1;
|
||||
|
||||
ALTER DATABASE dougal
|
||||
SET search_path TO "$user", public, topology;
|
||||
|
||||
EOF
|
||||
|
||||
# Adjust --jobs according to host machine
|
||||
pg_restore -U postgres --dbname dougal --clean --if-exists --jobs 32 /path/to/backup
|
||||
|
||||
```
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
-- PostgreSQL database dump
|
||||
--
|
||||
|
||||
-- Dumped from database version 12.4
|
||||
-- Dumped by pg_dump version 12.4
|
||||
-- Dumped from database version 14.2
|
||||
-- Dumped by pg_dump version 14.2
|
||||
|
||||
SET statement_timeout = 0;
|
||||
SET lock_timeout = 0;
|
||||
@@ -102,20 +102,6 @@ CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public;
|
||||
COMMENT ON EXTENSION postgis IS 'PostGIS geometry, geography, and raster spatial types and functions';
|
||||
|
||||
|
||||
--
|
||||
-- Name: postgis_raster; Type: EXTENSION; Schema: -; Owner: -
|
||||
--
|
||||
|
||||
CREATE EXTENSION IF NOT EXISTS postgis_raster WITH SCHEMA public;
|
||||
|
||||
|
||||
--
|
||||
-- Name: EXTENSION postgis_raster; Type: COMMENT; Schema: -; Owner:
|
||||
--
|
||||
|
||||
COMMENT ON EXTENSION postgis_raster IS 'PostGIS raster types and functions';
|
||||
|
||||
|
||||
--
|
||||
-- Name: postgis_sfcgal; Type: EXTENSION; Schema: -; Owner: -
|
||||
--
|
||||
@@ -144,6 +130,221 @@ CREATE EXTENSION IF NOT EXISTS postgis_topology WITH SCHEMA topology;
|
||||
COMMENT ON EXTENSION postgis_topology IS 'PostGIS topology spatial types and functions';
|
||||
|
||||
|
||||
--
|
||||
-- Name: queue_item_status; Type: TYPE; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TYPE public.queue_item_status AS ENUM (
|
||||
'queued',
|
||||
'cancelled',
|
||||
'failed',
|
||||
'sent'
|
||||
);
|
||||
|
||||
|
||||
ALTER TYPE public.queue_item_status OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: event_meta(timestamp with time zone); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.event_meta(tstamp timestamp with time zone) RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_meta(tstamp, NULL, NULL);
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.event_meta(tstamp timestamp with time zone) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION event_meta(tstamp timestamp with time zone); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.event_meta(tstamp timestamp with time zone) IS 'Overload of event_meta (timestamptz, integer, integer) for use when searching by timestamp.';
|
||||
|
||||
|
||||
--
|
||||
-- Name: event_meta(integer, integer); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.event_meta(sequence integer, point integer) RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_meta(NULL, sequence, point);
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.event_meta(sequence integer, point integer) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION event_meta(sequence integer, point integer); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.event_meta(sequence integer, point integer) IS 'Overload of event_meta (timestamptz, integer, integer) for use when searching by sequence / point.';
|
||||
|
||||
|
||||
--
|
||||
-- Name: event_meta(timestamp with time zone, integer, integer); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.event_meta(tstamp timestamp with time zone, sequence integer, point integer) RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
result jsonb;
|
||||
-- Tolerance is hard-coded, at least until a need to expose arises.
|
||||
tolerance numeric;
|
||||
BEGIN
|
||||
tolerance := 3; -- seconds
|
||||
|
||||
-- We search by timestamp if we can, as that's a lot quicker
|
||||
IF tstamp IS NOT NULL THEN
|
||||
|
||||
SELECT meta
|
||||
INTO result
|
||||
FROM real_time_inputs rti
|
||||
WHERE
|
||||
rti.tstamp BETWEEN (event_meta.tstamp - tolerance * interval '1 second') AND (event_meta.tstamp + tolerance * interval '1 second')
|
||||
ORDER BY abs(extract('epoch' FROM rti.tstamp - event_meta.tstamp ))
|
||||
LIMIT 1;
|
||||
|
||||
ELSE
|
||||
|
||||
SELECT meta
|
||||
INTO result
|
||||
FROM real_time_inputs rti
|
||||
WHERE
|
||||
(meta->>'_sequence')::integer = event_meta.sequence AND
|
||||
(meta->>'_point')::integer = event_meta.point
|
||||
ORDER BY rti.tstamp DESC
|
||||
LIMIT 1;
|
||||
|
||||
END IF;
|
||||
|
||||
RETURN result;
|
||||
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.event_meta(tstamp timestamp with time zone, sequence integer, point integer) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION event_meta(tstamp timestamp with time zone, sequence integer, point integer); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.event_meta(tstamp timestamp with time zone, sequence integer, point integer) IS 'Return the real-time event metadata associated with a sequence / point in the current project or
|
||||
with a given timestamp. Timestamp that is first searched for in the shot tables
|
||||
of the current prospect or, if not found, in the real-time data.
|
||||
|
||||
Returns a JSONB object.';
|
||||
|
||||
|
||||
--
|
||||
-- Name: geometry_from_tstamp(timestamp with time zone, numeric); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric) RETURNS record
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
SELECT
|
||||
geometry,
|
||||
extract('epoch' FROM tstamp - ts ) AS delta
|
||||
FROM real_time_inputs
|
||||
WHERE
|
||||
geometry IS NOT NULL AND
|
||||
tstamp BETWEEN (ts - tolerance * interval '1 second') AND (ts + tolerance * interval '1 second')
|
||||
ORDER BY abs(extract('epoch' FROM tstamp - ts ))
|
||||
LIMIT 1;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric) IS 'Get geometry from timestamp';
|
||||
|
||||
|
||||
--
|
||||
-- Name: interpolate_geometry_from_tstamp(timestamp with time zone, numeric); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric) RETURNS public.geometry
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
ts0 timestamptz;
|
||||
ts1 timestamptz;
|
||||
geom0 geometry;
|
||||
geom1 geometry;
|
||||
span numeric;
|
||||
fraction numeric;
|
||||
BEGIN
|
||||
|
||||
SELECT tstamp, geometry
|
||||
INTO ts0, geom0
|
||||
FROM real_time_inputs
|
||||
WHERE tstamp <= ts
|
||||
ORDER BY tstamp DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT tstamp, geometry
|
||||
INTO ts1, geom1
|
||||
FROM real_time_inputs
|
||||
WHERE tstamp >= ts
|
||||
ORDER BY tstamp ASC
|
||||
LIMIT 1;
|
||||
|
||||
IF geom0 IS NULL OR geom1 IS NULL THEN
|
||||
RAISE NOTICE 'Interpolation failed (no straddling data)';
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
-- See if we got an exact match
|
||||
IF ts0 = ts THEN
|
||||
RETURN geom0;
|
||||
ELSIF ts1 = ts THEN
|
||||
RETURN geom1;
|
||||
END IF;
|
||||
|
||||
span := extract('epoch' FROM ts1 - ts0);
|
||||
|
||||
IF span > maxspan THEN
|
||||
RAISE NOTICE 'Interpolation timespan % outside maximum requested (%)', span, maxspan;
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
fraction := extract('epoch' FROM ts - ts0) / span;
|
||||
|
||||
IF fraction < 0 OR fraction > 1 THEN
|
||||
RAISE NOTICE 'Requested timestamp % outside of interpolation span (fraction: %)', ts, fraction;
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
RETURN ST_LineInterpolatePoint(St_MakeLine(geom0, geom1), fraction);
|
||||
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric) IS 'Interpolate a position over a given maximum timespan (in seconds)
|
||||
based on real-time inputs. Returns a POINT geometry.';
|
||||
|
||||
|
||||
--
|
||||
-- Name: notify(); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -182,23 +383,110 @@ $$;
|
||||
|
||||
ALTER FUNCTION public.notify() OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: sequence_shot_from_tstamp(timestamp with time zone); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric) RETURNS record
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
SELECT * FROM public.sequence_shot_from_tstamp(ts, 3);
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric) IS 'Get sequence and shotpoint from timestamp.
|
||||
|
||||
Overloaded form in which the tolerance value is implied and defaults to three seconds.';
|
||||
|
||||
|
||||
--
|
||||
-- Name: sequence_shot_from_tstamp(timestamp with time zone, numeric); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric) RETURNS record
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
SELECT
|
||||
(meta->>'_sequence')::numeric AS sequence,
|
||||
(meta->>'_point')::numeric AS point,
|
||||
extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ) AS delta
|
||||
FROM real_time_inputs
|
||||
WHERE
|
||||
meta ? '_sequence' AND
|
||||
abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts )) < tolerance
|
||||
ORDER BY abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ))
|
||||
LIMIT 1;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: FUNCTION sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric); Type: COMMENT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric) IS 'Get sequence and shotpoint from timestamp.
|
||||
|
||||
Given a timestamp this function returns the closest shot to it within the given tolerance value.
|
||||
|
||||
This uses the `real_time_inputs` table and it does not give an indication of which project the shotpoint belongs to. It is assumed that a single project is being acquired at a given time.';
|
||||
|
||||
|
||||
--
|
||||
-- Name: set_survey(text); Type: PROCEDURE; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE PROCEDURE public.set_survey(project_id text)
|
||||
CREATE PROCEDURE public.set_survey(IN project_id text)
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
SELECT set_config('search_path', (SELECT schema||',public' FROM public.projects WHERE pid = lower(project_id)), false);
|
||||
$$;
|
||||
|
||||
|
||||
ALTER PROCEDURE public.set_survey(project_id text) OWNER TO postgres;
|
||||
ALTER PROCEDURE public.set_survey(IN project_id text) OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: update_timestamp(); Type: FUNCTION; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE FUNCTION public.update_timestamp() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF NEW.updated_on IS NOT NULL THEN
|
||||
NEW.updated_on := current_timestamp;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN undefined_column THEN RETURN NEW;
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
ALTER FUNCTION public.update_timestamp() OWNER TO postgres;
|
||||
|
||||
SET default_tablespace = '';
|
||||
|
||||
SET default_table_access_method = heap;
|
||||
|
||||
--
|
||||
-- Name: info; Type: TABLE; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TABLE public.info (
|
||||
key text NOT NULL,
|
||||
value jsonb
|
||||
);
|
||||
|
||||
|
||||
ALTER TABLE public.info OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: projects; Type: TABLE; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -213,6 +501,46 @@ CREATE TABLE public.projects (
|
||||
|
||||
ALTER TABLE public.projects OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: queue_items; Type: TABLE; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TABLE public.queue_items (
|
||||
item_id integer NOT NULL,
|
||||
status public.queue_item_status DEFAULT 'queued'::public.queue_item_status NOT NULL,
|
||||
payload jsonb NOT NULL,
|
||||
results jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
created_on timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
updated_on timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
not_before timestamp with time zone DEFAULT '1970-01-01 00:00:00+00'::timestamp with time zone NOT NULL,
|
||||
parent_id integer
|
||||
);
|
||||
|
||||
|
||||
ALTER TABLE public.queue_items OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: queue_items_item_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.queue_items_item_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
ALTER TABLE public.queue_items_item_id_seq OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: queue_items_item_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.queue_items_item_id_seq OWNED BY public.queue_items.item_id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: real_time_inputs; Type: TABLE; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -226,6 +554,21 @@ CREATE TABLE public.real_time_inputs (
|
||||
|
||||
ALTER TABLE public.real_time_inputs OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Name: queue_items item_id; Type: DEFAULT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.queue_items ALTER COLUMN item_id SET DEFAULT nextval('public.queue_items_item_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: info info_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.info
|
||||
ADD CONSTRAINT info_pkey PRIMARY KEY (key);
|
||||
|
||||
|
||||
--
|
||||
-- Name: projects projects_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -250,6 +593,14 @@ ALTER TABLE ONLY public.projects
|
||||
ADD CONSTRAINT projects_schema_key UNIQUE (schema);
|
||||
|
||||
|
||||
--
|
||||
-- Name: queue_items queue_items_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.queue_items
|
||||
ADD CONSTRAINT queue_items_pkey PRIMARY KEY (item_id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: tstamp_idx; Type: INDEX; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -257,6 +608,13 @@ ALTER TABLE ONLY public.projects
|
||||
CREATE INDEX tstamp_idx ON public.real_time_inputs USING btree (tstamp DESC);
|
||||
|
||||
|
||||
--
|
||||
-- Name: info info_tg; Type: TRIGGER; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON public.info FOR EACH ROW EXECUTE FUNCTION public.notify('info');
|
||||
|
||||
|
||||
--
|
||||
-- Name: projects projects_tg; Type: TRIGGER; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -264,6 +622,20 @@ CREATE INDEX tstamp_idx ON public.real_time_inputs USING btree (tstamp DESC);
|
||||
CREATE TRIGGER projects_tg AFTER INSERT OR DELETE OR UPDATE ON public.projects FOR EACH ROW EXECUTE FUNCTION public.notify('project');
|
||||
|
||||
|
||||
--
|
||||
-- Name: queue_items queue_items_tg0; Type: TRIGGER; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TRIGGER queue_items_tg0 BEFORE INSERT OR UPDATE ON public.queue_items FOR EACH ROW EXECUTE FUNCTION public.update_timestamp();
|
||||
|
||||
|
||||
--
|
||||
-- Name: queue_items queue_items_tg1; Type: TRIGGER; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TRIGGER queue_items_tg1 AFTER INSERT OR DELETE OR UPDATE ON public.queue_items FOR EACH ROW EXECUTE FUNCTION public.notify('queue_items');
|
||||
|
||||
|
||||
--
|
||||
-- Name: real_time_inputs real_time_inputs_tg; Type: TRIGGER; Schema: public; Owner: postgres
|
||||
--
|
||||
@@ -271,6 +643,14 @@ CREATE TRIGGER projects_tg AFTER INSERT OR DELETE OR UPDATE ON public.projects F
|
||||
CREATE TRIGGER real_time_inputs_tg AFTER INSERT ON public.real_time_inputs FOR EACH ROW EXECUTE FUNCTION public.notify('realtime');
|
||||
|
||||
|
||||
--
|
||||
-- Name: queue_items queue_items_parent_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.queue_items
|
||||
ADD CONSTRAINT queue_items_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES public.queue_items(item_id);
|
||||
|
||||
|
||||
--
|
||||
-- PostgreSQL database dump complete
|
||||
--
|
||||
|
||||
5
etc/db/database-version.sql
Normal file
5
etc/db/database-version.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
\connect dougal
|
||||
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.5"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.5"}' WHERE public.info.key = 'version';
|
||||
File diff suppressed because it is too large
Load Diff
34
etc/db/upgrades/README.md
Normal file
34
etc/db/upgrades/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Database schema upgrades
|
||||
|
||||
When the database schema needs to be upgraded in order to provide new functionality, fix errors, etc., an upgrade script should be added to this directory.
|
||||
|
||||
The script can be SQL (preferred) or anything else (Bash, Python, …) in the event of complex upgrades.
|
||||
|
||||
The script itself should:
|
||||
|
||||
* document what the intended changes are;
|
||||
* contain instructions on how to run it;
|
||||
* make the user aware of any non-obvious side effects; and
|
||||
* say if it is safe to run the script multiple times on the
|
||||
* same schema / database.
|
||||
|
||||
## Naming
|
||||
|
||||
Script files should be named `upgrade-<index>-<commit-id-old>-<commit-id-new>-v<schema-version>.sql`, where:
|
||||
|
||||
* `<index>` is a correlative two-digit index. When reaching 99, existing files will be renamed to a three digit index (001-099) and new files will use three digits.
|
||||
* `<commit-id-old>` is the ID of the Git commit that last introduced a schema change.
|
||||
* `<commit-id-new>` is the ID of the first Git commit expecting the updated schema.
|
||||
* `<schema-version>` is the version of the schema.
|
||||
|
||||
Note: the `<schema-version>` value should be updated with every change and it should be the same as reported by:
|
||||
|
||||
```sql
|
||||
select value->>'db_schema' as db_schema from public.info where key = 'version';
|
||||
```
|
||||
|
||||
If necessary, the wanted schema version must also be updated in `package.json`.
|
||||
|
||||
## Running
|
||||
|
||||
Schema upgrades are always run manually.
|
||||
22
etc/db/upgrades/upgrade01-78adb2be→7917eeeb.sql
Normal file
22
etc/db/upgrades/upgrade01-78adb2be→7917eeeb.sql
Normal file
@@ -0,0 +1,22 @@
|
||||
-- Upgrade the database from commit 78adb2be to 7917eeeb.
|
||||
--
|
||||
-- This upgrade affects the `public` schema only.
|
||||
--
|
||||
-- It creates a new table, `info`, for storing arbitrary JSON
|
||||
-- data not belonging to a specific project. Currently used
|
||||
-- for the equipment list, it could also serve to store user
|
||||
-- details, configuration settings, system state, etc.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql < $THIS_FILE
|
||||
--
|
||||
-- NOTE: It will fail harmlessly if applied twice.
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS public.info (
|
||||
key text NOT NULL primary key,
|
||||
value jsonb
|
||||
);
|
||||
|
||||
CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON public.info FOR EACH ROW EXECUTE FUNCTION public.notify('info');
|
||||
160
etc/db/upgrades/upgrade02-6e7ba82e→53f71f70.sql
Normal file
160
etc/db/upgrades/upgrade02-6e7ba82e→53f71f70.sql
Normal file
@@ -0,0 +1,160 @@
|
||||
-- Upgrade the database from commit 6e7ba82e to 53f71f70.
|
||||
--
|
||||
-- NOTE: This upgrade must be applied to every schema in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This merges two changes to the database.
|
||||
-- The first one (commit 5de64e6b) modifies the `event` view to return
|
||||
-- the `meta` column of timed and sequence events.
|
||||
-- The second one (commit 53f71f70) adds a primary key constraint to
|
||||
-- events_seq_labels (there is already an equivalent constraint on
|
||||
-- events_seq_timed).
|
||||
--
|
||||
-- To apply, run as the dougal user, for every schema in the database:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- SET search_path TO survey_*,public;
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It will fail harmlessly if applied twice.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
DROP VIEW events_seq_timed CASCADE; -- Brings down events too
|
||||
ALTER TABLE ONLY events_seq_labels
|
||||
ADD CONSTRAINT events_seq_labels_pkey PRIMARY KEY (id, label);
|
||||
|
||||
|
||||
CREATE OR REPLACE VIEW events_seq_timed AS
|
||||
SELECT s.sequence,
|
||||
s.point,
|
||||
s.id,
|
||||
s.remarks,
|
||||
rs.line,
|
||||
rs.objref,
|
||||
rs.tstamp,
|
||||
rs.hash,
|
||||
s.meta,
|
||||
rs.geometry
|
||||
FROM (events_seq s
|
||||
LEFT JOIN raw_shots rs USING (sequence, point));
|
||||
|
||||
|
||||
|
||||
CREATE OR REPLACE VIEW events AS
|
||||
WITH qc AS (
|
||||
SELECT rs.sequence,
|
||||
rs.point,
|
||||
ARRAY[jsonb_array_elements_text(q.labels)] AS labels
|
||||
FROM raw_shots rs,
|
||||
LATERAL jsonb_path_query(rs.meta, '$."qc".*."labels"'::jsonpath) q(labels)
|
||||
)
|
||||
SELECT 'sequence'::text AS type,
|
||||
false AS virtual,
|
||||
s.sequence,
|
||||
s.point,
|
||||
s.id,
|
||||
s.remarks,
|
||||
s.line,
|
||||
s.objref,
|
||||
s.tstamp,
|
||||
s.hash,
|
||||
s.meta,
|
||||
(public.st_asgeojson(public.st_transform(s.geometry, 4326)))::jsonb AS geometry,
|
||||
ARRAY( SELECT esl.label
|
||||
FROM events_seq_labels esl
|
||||
WHERE (esl.id = s.id)) AS labels
|
||||
FROM events_seq_timed s
|
||||
UNION
|
||||
SELECT 'timed'::text AS type,
|
||||
false AS virtual,
|
||||
rs.sequence,
|
||||
rs.point,
|
||||
t.id,
|
||||
t.remarks,
|
||||
rs.line,
|
||||
rs.objref,
|
||||
t.tstamp,
|
||||
rs.hash,
|
||||
t.meta,
|
||||
(t.meta -> 'geometry'::text) AS geometry,
|
||||
ARRAY( SELECT etl.label
|
||||
FROM events_timed_labels etl
|
||||
WHERE (etl.id = t.id)) AS labels
|
||||
FROM ((events_timed t
|
||||
LEFT JOIN events_timed_seq ts USING (id))
|
||||
LEFT JOIN raw_shots rs USING (sequence, point))
|
||||
UNION
|
||||
SELECT 'midnight shot'::text AS type,
|
||||
true AS virtual,
|
||||
v1.sequence,
|
||||
v1.point,
|
||||
((v1.sequence * 100000) + v1.point) AS id,
|
||||
''::text AS remarks,
|
||||
v1.line,
|
||||
v1.objref,
|
||||
v1.tstamp,
|
||||
v1.hash,
|
||||
'{}'::jsonb meta,
|
||||
(public.st_asgeojson(public.st_transform(v1.geometry, 4326)))::jsonb AS geometry,
|
||||
ARRAY[v1.label] AS labels
|
||||
FROM events_midnight_shot v1
|
||||
UNION
|
||||
SELECT 'qc'::text AS type,
|
||||
true AS virtual,
|
||||
rs.sequence,
|
||||
rs.point,
|
||||
((10000000 + (rs.sequence * 100000)) + rs.point) AS id,
|
||||
(q.remarks)::text AS remarks,
|
||||
rs.line,
|
||||
rs.objref,
|
||||
rs.tstamp,
|
||||
rs.hash,
|
||||
'{}'::jsonb meta,
|
||||
(public.st_asgeojson(public.st_transform(rs.geometry, 4326)))::jsonb AS geometry,
|
||||
('{QC}'::text[] || qc.labels) AS labels
|
||||
FROM (raw_shots rs
|
||||
LEFT JOIN qc USING (sequence, point)),
|
||||
LATERAL jsonb_path_query(rs.meta, '$."qc".*."results"'::jsonpath) q(remarks)
|
||||
WHERE (rs.meta ? 'qc'::text);
|
||||
|
||||
|
||||
CREATE OR REPLACE VIEW final_lines_summary AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT fs.sequence,
|
||||
first_value(fs.point) OVER w AS fsp,
|
||||
last_value(fs.point) OVER w AS lsp,
|
||||
first_value(fs.tstamp) OVER w AS ts0,
|
||||
last_value(fs.tstamp) OVER w AS ts1,
|
||||
count(fs.point) OVER w AS num_points,
|
||||
public.st_distance(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) AS length,
|
||||
((public.st_azimuth(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
|
||||
FROM final_shots fs
|
||||
WINDOW w AS (PARTITION BY fs.sequence ORDER BY fs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT fl.sequence,
|
||||
fl.line,
|
||||
s.fsp,
|
||||
s.lsp,
|
||||
s.ts0,
|
||||
s.ts1,
|
||||
(s.ts1 - s.ts0) AS duration,
|
||||
s.num_points,
|
||||
(( SELECT count(*) AS count
|
||||
FROM preplot_points
|
||||
WHERE ((preplot_points.line = fl.line) AND (((preplot_points.point >= s.fsp) AND (preplot_points.point <= s.lsp)) OR ((preplot_points.point >= s.lsp) AND (preplot_points.point <= s.fsp))))) - s.num_points) AS missing_shots,
|
||||
s.length,
|
||||
s.azimuth,
|
||||
fl.remarks,
|
||||
fl.meta
|
||||
FROM (summary s
|
||||
JOIN final_lines fl USING (sequence));
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
|
||||
171
etc/db/upgrades/upgrade03-53f71f70→4d977848.sql
Normal file
171
etc/db/upgrades/upgrade03-53f71f70→4d977848.sql
Normal file
@@ -0,0 +1,171 @@
|
||||
-- Upgrade the database from commit 53f71f70 to 4d977848.
|
||||
--
|
||||
-- NOTE: This upgrade must be applied to every schema in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This adds:
|
||||
--
|
||||
-- * label_in_sequence (_sequence integer, _label text):
|
||||
-- Returns events containing the specified label.
|
||||
--
|
||||
-- * handle_final_line_events (_seq integer, _label text, _column text):
|
||||
-- - If _label does not exist in the events for sequence _seq:
|
||||
-- it adds a new _label label at the shotpoint obtained from
|
||||
-- final_lines_summary[_column].
|
||||
-- - If _label does exist (and hasn't been auto-added by this function
|
||||
-- in a previous run), it will add information about it to the final
|
||||
-- line's metadata.
|
||||
--
|
||||
-- * final_line_post_import (_seq integer):
|
||||
-- Calls handle_final_line_events() on the given sequence to check
|
||||
-- for FSP, FGSP, LGSP and LSP labels.
|
||||
--
|
||||
-- * events_seq_labels_single ():
|
||||
-- Trigger function to ensure that labels that have the attribute
|
||||
-- `model.multiple` set to `false` occur at most only once per
|
||||
-- sequence. If a new instance is added to a sequence, the previous
|
||||
-- instance is deleted.
|
||||
--
|
||||
-- * Trigger on events_seq_labels that calls events_seq_labels_single().
|
||||
--
|
||||
-- * Trigger on events_timed_labels that calls events_seq_labels_single().
|
||||
--
|
||||
-- To apply, run as the dougal user, for every schema in the database:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- SET search_path TO survey_*,public;
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It will fail harmlessly if applied twice.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE FUNCTION label_in_sequence (_sequence integer, _label text)
|
||||
RETURNS events
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
SELECT * FROM events WHERE sequence = _sequence AND _label = ANY(labels);
|
||||
$$;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE handle_final_line_events (_seq integer, _label text, _column text)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
|
||||
DECLARE
|
||||
_line final_lines_summary%ROWTYPE;
|
||||
_column_value integer;
|
||||
_tg_name text := 'final_line';
|
||||
_event events%ROWTYPE;
|
||||
event_id integer;
|
||||
BEGIN
|
||||
|
||||
SELECT * INTO _line FROM final_lines_summary WHERE sequence = _seq;
|
||||
_event := label_in_sequence(_seq, _label);
|
||||
_column_value := row_to_json(_line)->>_column;
|
||||
|
||||
--RAISE NOTICE '% is %', _label, _event;
|
||||
--RAISE NOTICE 'Line is %', _line;
|
||||
--RAISE NOTICE '% is % (%)', _column, _column_value, _label;
|
||||
|
||||
IF _event IS NULL THEN
|
||||
--RAISE NOTICE 'We will populate the event log from the sequence data';
|
||||
|
||||
SELECT id INTO event_id FROM events_seq WHERE sequence = _seq AND point = _column_value ORDER BY id LIMIT 1;
|
||||
IF event_id IS NULL THEN
|
||||
--RAISE NOTICE '… but there is no existing event so we create a new one for sequence % and point %', _line.sequence, _column_value;
|
||||
INSERT INTO events_seq (sequence, point, remarks)
|
||||
VALUES (_line.sequence, _column_value, format('%s %s', _label, (SELECT meta->>'lineName' FROM final_lines WHERE sequence = _seq)))
|
||||
RETURNING id INTO event_id;
|
||||
--RAISE NOTICE 'Created event_id %', event_id;
|
||||
END IF;
|
||||
|
||||
--RAISE NOTICE 'Remove any other auto-inserted % labels in sequence %', _label, _seq;
|
||||
DELETE FROM events_seq_labels
|
||||
WHERE label = _label AND id = (SELECT id FROM events_seq WHERE sequence = _seq AND meta->'auto' ? _label);
|
||||
|
||||
--RAISE NOTICE 'We now add a label to the event (id, label) = (%, %)', event_id, _label;
|
||||
INSERT INTO events_seq_labels (id, label) VALUES (event_id, _label) ON CONFLICT ON CONSTRAINT events_seq_labels_pkey DO NOTHING;
|
||||
|
||||
--RAISE NOTICE 'And also clear the %: % flag from meta.auto for any existing events for sequence %', _label, _tg_name, _seq;
|
||||
UPDATE events_seq
|
||||
SET meta = meta #- ARRAY['auto', _label]
|
||||
WHERE meta->'auto' ? _label AND sequence = _seq AND id <> event_id;
|
||||
|
||||
--RAISE NOTICE 'Finally, flag the event as having been had label % auto-created by %', _label, _tg_name;
|
||||
UPDATE events_seq
|
||||
SET meta = jsonb_set(jsonb_set(meta, '{auto}', COALESCE(meta->'auto', '{}')), ARRAY['auto', _label], to_jsonb(_tg_name))
|
||||
WHERE id = event_id;
|
||||
|
||||
ELSE
|
||||
--RAISE NOTICE 'We may populate the sequence meta from the event log';
|
||||
--RAISE NOTICE 'Unless the event log was populated by us previously';
|
||||
--RAISE NOTICE 'Populated by us previously? %', _event.meta->'auto'->>_label = _tg_name;
|
||||
|
||||
IF _event.meta->'auto'->>_label IS DISTINCT FROM _tg_name THEN
|
||||
--RAISE NOTICE 'Adding % found in events log to final_line meta', _label;
|
||||
UPDATE final_lines
|
||||
SET meta = jsonb_set(meta, ARRAY[_label], to_jsonb(_event.point))
|
||||
WHERE sequence = _seq;
|
||||
|
||||
--RAISE NOTICE 'Clearing the %: % flag from meta.auto for any existing events in sequence %', _label, _tg_name, _seq;
|
||||
UPDATE events_seq
|
||||
SET meta = meta #- ARRAY['auto', _label]
|
||||
WHERE sequence = _seq AND meta->'auto'->>_label = _tg_name;
|
||||
|
||||
END IF;
|
||||
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE final_line_post_import (_seq integer)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
|
||||
CALL handle_final_line_events(_seq, 'FSP', 'fsp');
|
||||
CALL handle_final_line_events(_seq, 'FGSP', 'fsp');
|
||||
CALL handle_final_line_events(_seq, 'LGSP', 'lsp');
|
||||
CALL handle_final_line_events(_seq, 'LSP', 'lsp');
|
||||
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION events_seq_labels_single ()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE _sequence integer;
|
||||
BEGIN
|
||||
IF EXISTS(SELECT 1 FROM labels WHERE name = NEW.label AND (data->'model'->'multiple')::boolean IS FALSE) THEN
|
||||
SELECT sequence INTO _sequence FROM events WHERE id = NEW.id;
|
||||
DELETE
|
||||
FROM events_seq_labels
|
||||
WHERE
|
||||
id <> NEW.id
|
||||
AND label = NEW.label
|
||||
AND id IN (SELECT id FROM events_seq WHERE sequence = _sequence);
|
||||
|
||||
DELETE
|
||||
FROM events_timed_labels
|
||||
WHERE
|
||||
id <> NEW.id
|
||||
AND label = NEW.label
|
||||
AND id IN (SELECT id FROM events_timed_seq WHERE sequence = _sequence);
|
||||
END IF;
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE TRIGGER events_seq_labels_single_tg AFTER INSERT OR UPDATE ON events_seq_labels FOR EACH ROW EXECUTE FUNCTION events_seq_labels_single();
|
||||
CREATE TRIGGER events_seq_labels_single_tg AFTER INSERT OR UPDATE ON events_timed_labels FOR EACH ROW EXECUTE FUNCTION events_seq_labels_single();
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
|
||||
94
etc/db/upgrades/upgrade04-4d977848→3d70a460.sql
Normal file
94
etc/db/upgrades/upgrade04-4d977848→3d70a460.sql
Normal file
@@ -0,0 +1,94 @@
|
||||
-- Upgrade the database from commit 4d977848 to 3d70a460.
|
||||
--
|
||||
-- NOTE: This upgrade must be applied to every schema in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This adds the `meta` column to the output of the following views:
|
||||
--
|
||||
-- * raw_lines_summary; and
|
||||
-- * sequences_summary
|
||||
--
|
||||
-- To apply, run as the dougal user, for every schema in the database:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- SET search_path TO survey_*,public;
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE VIEW raw_lines_summary AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT rs.sequence,
|
||||
first_value(rs.point) OVER w AS fsp,
|
||||
last_value(rs.point) OVER w AS lsp,
|
||||
first_value(rs.tstamp) OVER w AS ts0,
|
||||
last_value(rs.tstamp) OVER w AS ts1,
|
||||
count(rs.point) OVER w AS num_points,
|
||||
count(pp.point) OVER w AS num_preplots,
|
||||
public.st_distance(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) AS length,
|
||||
((public.st_azimuth(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
|
||||
FROM (raw_shots rs
|
||||
LEFT JOIN preplot_points pp USING (line, point))
|
||||
WINDOW w AS (PARTITION BY rs.sequence ORDER BY rs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT rl.sequence,
|
||||
rl.line,
|
||||
s.fsp,
|
||||
s.lsp,
|
||||
s.ts0,
|
||||
s.ts1,
|
||||
(s.ts1 - s.ts0) AS duration,
|
||||
s.num_points,
|
||||
s.num_preplots,
|
||||
(( SELECT count(*) AS count
|
||||
FROM preplot_points
|
||||
WHERE ((preplot_points.line = rl.line) AND (((preplot_points.point >= s.fsp) AND (preplot_points.point <= s.lsp)) OR ((preplot_points.point >= s.lsp) AND (preplot_points.point <= s.fsp))))) - s.num_preplots) AS missing_shots,
|
||||
s.length,
|
||||
s.azimuth,
|
||||
rl.remarks,
|
||||
rl.ntbp,
|
||||
rl.meta
|
||||
FROM (summary s
|
||||
JOIN raw_lines rl USING (sequence));
|
||||
|
||||
DROP VIEW sequences_summary;
|
||||
CREATE OR REPLACE VIEW sequences_summary AS
|
||||
SELECT rls.sequence,
|
||||
rls.line,
|
||||
rls.fsp,
|
||||
rls.lsp,
|
||||
fls.fsp AS fsp_final,
|
||||
fls.lsp AS lsp_final,
|
||||
rls.ts0,
|
||||
rls.ts1,
|
||||
fls.ts0 AS ts0_final,
|
||||
fls.ts1 AS ts1_final,
|
||||
rls.duration,
|
||||
fls.duration AS duration_final,
|
||||
rls.num_preplots,
|
||||
COALESCE(fls.num_points, rls.num_points) AS num_points,
|
||||
COALESCE(fls.missing_shots, rls.missing_shots) AS missing_shots,
|
||||
COALESCE(fls.length, rls.length) AS length,
|
||||
COALESCE(fls.azimuth, rls.azimuth) AS azimuth,
|
||||
rls.remarks,
|
||||
fls.remarks AS remarks_final,
|
||||
rls.meta,
|
||||
fls.meta AS meta_final,
|
||||
CASE
|
||||
WHEN (rls.ntbp IS TRUE) THEN 'ntbp'::text
|
||||
WHEN (fls.sequence IS NULL) THEN 'raw'::text
|
||||
ELSE 'final'::text
|
||||
END AS status
|
||||
FROM (raw_lines_summary rls
|
||||
LEFT JOIN final_lines_summary fls USING (sequence));
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
|
||||
33
etc/db/upgrades/upgrade05-3d70a460→0983abac.sql
Normal file
33
etc/db/upgrades/upgrade05-3d70a460→0983abac.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
-- Upgrade the database from commit 3d70a460 to 0983abac.
|
||||
--
|
||||
-- NOTE: This upgrade must be applied to every schema in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This:
|
||||
--
|
||||
-- * makes the primary key on planned_lines deferrable; and
|
||||
-- * changes the planned_lines trigger from statement to row.
|
||||
--
|
||||
-- To apply, run as the dougal user, for every schema in the database:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- SET search_path TO survey_*,public;
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE planned_lines DROP CONSTRAINT planned_lines_pkey;
|
||||
ALTER TABLE planned_lines ADD CONSTRAINT planned_lines_pkey PRIMARY KEY (sequence) DEFERRABLE;
|
||||
|
||||
DROP TRIGGER planned_lines_tg ON planned_lines;
|
||||
CREATE TRIGGER planned_lines_tg AFTER INSERT OR DELETE OR UPDATE ON planned_lines FOR EACH ROW EXECUTE FUNCTION public.notify('planned_lines');
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
207
etc/db/upgrades/upgrade06-0983abac→81d9ea19.sql
Normal file
207
etc/db/upgrades/upgrade06-0983abac→81d9ea19.sql
Normal file
@@ -0,0 +1,207 @@
|
||||
-- Upgrade the database from commit 0983abac to 81d9ea19.
|
||||
--
|
||||
-- NOTE: This upgrade must be applied to every schema in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This defines a new procedure adjust_planner() which resolves some
|
||||
-- conflicts between shot sequences and the planner, such as removing
|
||||
-- sequences that have been shot, renumbering, or adjusting the planned
|
||||
-- times.
|
||||
--
|
||||
-- It is meant to be called at regular intervals by an external process,
|
||||
-- such as the runner (software/bin/runner.sh).
|
||||
--
|
||||
-- A trigger for changes to the schema's `info` table is also added.
|
||||
--
|
||||
-- To apply, run as the dougal user, for every schema in the database:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- SET search_path TO survey_*,public;
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE adjust_planner ()
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
_planner_config jsonb;
|
||||
_planned_line planned_lines%ROWTYPE;
|
||||
_lag interval;
|
||||
_last_sequence sequences_summary%ROWTYPE;
|
||||
_deltatime interval;
|
||||
_shotinterval interval;
|
||||
_tstamp timestamptz;
|
||||
_incr integer;
|
||||
BEGIN
|
||||
|
||||
SET CONSTRAINTS planned_lines_pkey DEFERRED;
|
||||
|
||||
SELECT data->'planner'
|
||||
INTO _planner_config
|
||||
FROM file_data
|
||||
WHERE data ? 'planner';
|
||||
|
||||
SELECT *
|
||||
INTO _last_sequence
|
||||
FROM sequences_summary
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT *
|
||||
INTO _planned_line
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
|
||||
|
||||
SELECT
|
||||
COALESCE(
|
||||
((lead(ts0) OVER (ORDER BY sequence)) - ts1),
|
||||
make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer)
|
||||
)
|
||||
INTO _lag
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
|
||||
|
||||
_incr = sign(_last_sequence.lsp - _last_sequence.fsp);
|
||||
|
||||
RAISE NOTICE '_planner_config: %', _planner_config;
|
||||
RAISE NOTICE '_last_sequence: %', _last_sequence;
|
||||
RAISE NOTICE '_planned_line: %', _planned_line;
|
||||
RAISE NOTICE '_incr: %', _incr;
|
||||
|
||||
-- Does the latest sequence match a planned sequence?
|
||||
IF _planned_line IS NULL THEN -- No it doesn't
|
||||
RAISE NOTICE 'Latest sequence shot does not match a planned sequence';
|
||||
SELECT * INTO _planned_line FROM planned_lines ORDER BY sequence ASC LIMIT 1;
|
||||
RAISE NOTICE '_planned_line: %', _planned_line;
|
||||
|
||||
IF _planned_line.sequence <= _last_sequence.sequence THEN
|
||||
RAISE NOTICE 'Renumbering the planned sequences starting from %', _planned_line.sequence + 1;
|
||||
-- Renumber the planned sequences starting from last shot sequence number + 1
|
||||
UPDATE planned_lines
|
||||
SET sequence = sequence + _last_sequence.sequence - _planned_line.sequence + 1;
|
||||
END IF;
|
||||
|
||||
-- The correction to make to the first planned line's ts0 will be based on either the last
|
||||
-- sequence's EOL + default line change time or the current time, whichever is later.
|
||||
_deltatime := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1) + make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer), current_timestamp) - _planned_line.ts0;
|
||||
|
||||
-- Is the first of the planned lines start time in the past? (±5 mins)
|
||||
IF _planned_line.ts0 < (current_timestamp - make_interval(mins => 5)) THEN
|
||||
RAISE NOTICE 'First planned line is in the past. Adjusting times by %', _deltatime;
|
||||
-- Adjust the start / end time of the planned lines by assuming that we are at
|
||||
-- `defaultLineChangeDuration` minutes away from SOL of the first planned line.
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
ts0 = ts0 + _deltatime,
|
||||
ts1 = ts1 + _deltatime;
|
||||
END IF;
|
||||
|
||||
ELSE -- Yes it does
|
||||
RAISE NOTICE 'Latest sequence does match a planned sequence: %, %', _planned_line.sequence, _planned_line.line;
|
||||
|
||||
-- Is it online?
|
||||
IF EXISTS(SELECT 1 FROM raw_lines_files WHERE sequence = _last_sequence.sequence AND hash = '*online*') THEN
|
||||
-- Yes it is
|
||||
RAISE NOTICE 'Sequence % is online', _last_sequence.sequence;
|
||||
|
||||
-- Let us get the SOL from the events log if we can
|
||||
RAISE NOTICE 'Trying to set fsp, ts0 from events log FSP, FGSP';
|
||||
WITH e AS (
|
||||
SELECT * FROM events
|
||||
WHERE
|
||||
sequence = _last_sequence.sequence
|
||||
AND ('FSP' = ANY(labels) OR 'FGSP' = ANY(labels))
|
||||
ORDER BY tstamp LIMIT 1
|
||||
)
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
fsp = COALESCE(e.point, fsp),
|
||||
ts0 = COALESCE(e.tstamp, ts0)
|
||||
FROM e
|
||||
WHERE planned_lines.sequence = _last_sequence.sequence;
|
||||
|
||||
-- Shot interval
|
||||
_shotinterval := (_last_sequence.ts1 - _last_sequence.ts0) / abs(_last_sequence.lsp - _last_sequence.fsp);
|
||||
|
||||
RAISE NOTICE 'Estimating EOL from current shot interval: %', _shotinterval;
|
||||
|
||||
SELECT (abs(lsp-fsp) * _shotinterval + ts0) - ts1
|
||||
INTO _deltatime
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
---- Set ts1 for the current sequence
|
||||
--UPDATE planned_lines
|
||||
--SET
|
||||
--ts1 = (abs(lsp-fsp) * _shotinterval) + ts0
|
||||
--WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
RAISE NOTICE 'Adjustment is %', _deltatime;
|
||||
|
||||
IF abs(EXTRACT(EPOCH FROM _deltatime)) < 8 THEN
|
||||
RAISE NOTICE 'Adjustment too small (< 8 s), so not applying it';
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Adjust ts1 for the current sequence
|
||||
UPDATE planned_lines
|
||||
SET ts1 = ts1 + _deltatime
|
||||
WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
-- Now shift all sequences after
|
||||
UPDATE planned_lines
|
||||
SET ts0 = ts0 + _deltatime, ts1 = ts1 + _deltatime
|
||||
WHERE sequence > _last_sequence.sequence;
|
||||
|
||||
RAISE NOTICE 'Deleting planned sequences before %', _planned_line.sequence;
|
||||
-- Remove all previous planner entries.
|
||||
DELETE
|
||||
FROM planned_lines
|
||||
WHERE sequence < _last_sequence.sequence;
|
||||
|
||||
ELSE
|
||||
-- No it isn't
|
||||
RAISE NOTICE 'Sequence % is offline', _last_sequence.sequence;
|
||||
|
||||
-- We were supposed to finish at _planned_line.ts1 but we finished at:
|
||||
_tstamp := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1), current_timestamp);
|
||||
-- WARNING Next line is for testing only
|
||||
--_tstamp := COALESCE(_last_sequence.ts1_final, _last_sequence.ts1);
|
||||
-- So we need to adjust timestamps by:
|
||||
_deltatime := _tstamp - _planned_line.ts1;
|
||||
|
||||
RAISE NOTICE 'Planned end: %, actual end: % (%, %)', _planned_line.ts1, _tstamp, _planned_line.sequence, _last_sequence.sequence;
|
||||
RAISE NOTICE 'Shifting times by % for sequences > %', _deltatime, _planned_line.sequence;
|
||||
-- NOTE: This won't work if sequences are not, err… sequential.
|
||||
-- NOTE: This has been known to happen in 2020.
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
ts0 = ts0 + _deltatime,
|
||||
ts1 = ts1 + _deltatime
|
||||
WHERE sequence > _planned_line.sequence;
|
||||
|
||||
RAISE NOTICE 'Deleting planned sequences up to %', _planned_line.sequence;
|
||||
-- Remove all previous planner entries.
|
||||
DELETE
|
||||
FROM planned_lines
|
||||
WHERE sequence <= _last_sequence.sequence;
|
||||
|
||||
END IF;
|
||||
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
DROP TRIGGER IF EXISTS info_tg ON info;
|
||||
CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON info FOR EACH ROW EXECUTE FUNCTION public.notify('info');
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
91
etc/db/upgrades/upgrade07-81d9ea19→0a10c897.sql
Normal file
91
etc/db/upgrades/upgrade07-81d9ea19→0a10c897.sql
Normal file
@@ -0,0 +1,91 @@
|
||||
-- Upgrade the database from commit 81d9ea19 to 0a10c897.
|
||||
--
|
||||
-- NOTE: This upgrade must be applied to every schema in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This defines a new function ij_error(line, point, geometry) which
|
||||
-- returns the crossline and inline distance (in metres) between the
|
||||
-- geometry (which must be a point) and the preplot corresponding to
|
||||
-- line / point.
|
||||
--
|
||||
-- To apply, run as the dougal user, for every schema in the database:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- SET search_path TO survey_*,public;
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
|
||||
-- Return the crossline, inline error of `geom` with respect to `line` and `point`
|
||||
-- in the project's binning grid.
|
||||
|
||||
CREATE OR REPLACE FUNCTION ij_error(line double precision, point double precision, geom public.geometry)
|
||||
RETURNS public.geometry(Point, 0)
|
||||
LANGUAGE plpgsql STABLE LEAKPROOF
|
||||
AS $$
|
||||
DECLARE
|
||||
bp jsonb := binning_parameters();
|
||||
ij public.geometry := to_binning_grid(geom, bp);
|
||||
|
||||
theta numeric := (bp->>'theta')::numeric * pi() / 180;
|
||||
I_inc numeric DEFAULT 1;
|
||||
J_inc numeric DEFAULT 1;
|
||||
I_width numeric := (bp->>'I_width')::numeric;
|
||||
J_width numeric := (bp->>'J_width')::numeric;
|
||||
|
||||
a numeric := (I_inc/I_width) * cos(theta);
|
||||
b numeric := (I_inc/I_width) * -sin(theta);
|
||||
c numeric := (J_inc/J_width) * sin(theta);
|
||||
d numeric := (J_inc/J_width) * cos(theta);
|
||||
xoff numeric := (bp->'origin'->>'I')::numeric;
|
||||
yoff numeric := (bp->'origin'->>'J')::numeric;
|
||||
E0 numeric := (bp->'origin'->>'easting')::numeric;
|
||||
N0 numeric := (bp->'origin'->>'northing')::numeric;
|
||||
|
||||
error_i double precision;
|
||||
error_j double precision;
|
||||
BEGIN
|
||||
error_i := (public.st_x(ij) - line) * I_width;
|
||||
error_j := (public.st_y(ij) - point) * J_width;
|
||||
|
||||
RETURN public.ST_MakePoint(error_i, error_j);
|
||||
END
|
||||
$$;
|
||||
|
||||
|
||||
-- Return the list of points and metadata for all sequences.
|
||||
-- Only points which have a corresponding preplot are returned.
|
||||
-- If available, final positions are returned as well, if not they
|
||||
-- are NULL.
|
||||
-- Likewise, crossline / inline errors are also returned as a PostGIS
|
||||
-- 2D point both for raw and final data.
|
||||
|
||||
CREATE OR REPLACE VIEW sequences_detail AS
|
||||
SELECT
|
||||
rl.sequence, rl.line AS sailline,
|
||||
rs.line, rs.point,
|
||||
rs.tstamp,
|
||||
rs.objref objRefRaw, fs.objref objRefFinal,
|
||||
ST_Transform(pp.geometry, 4326) geometryPreplot,
|
||||
ST_Transform(rs.geometry, 4326) geometryRaw,
|
||||
ST_Transform(fs.geometry, 4326) geometryFinal,
|
||||
ij_error(rs.line, rs.point, rs.geometry) errorRaw,
|
||||
ij_error(rs.line, rs.point, fs.geometry) errorFinal,
|
||||
json_build_object('preplot', pp.meta, 'raw', rs.meta, 'final', fs.meta) meta
|
||||
FROM
|
||||
raw_lines rl
|
||||
INNER JOIN raw_shots rs USING (sequence)
|
||||
INNER JOIN preplot_points pp ON rs.line = pp.line AND rs.point = pp.point
|
||||
LEFT JOIN final_shots fs ON rl.sequence = fs.sequence AND rs.point = fs.point;
|
||||
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
75
etc/db/upgrades/upgrade08-81d9ea19→74b3de5c.sql
Normal file
75
etc/db/upgrades/upgrade08-81d9ea19→74b3de5c.sql
Normal file
@@ -0,0 +1,75 @@
|
||||
-- Upgrade the database from commit 81d9ea19 to 74b3de5c.
|
||||
--
|
||||
-- This upgrade affects the `public` schema only.
|
||||
--
|
||||
-- It creates a new table, `queue_items`, for storing
|
||||
-- requests and responses related to inter-API communication.
|
||||
-- At the moment this means Equinor's ASAQC API, but it
|
||||
-- should be applicable to others as well if the need
|
||||
-- arises.
|
||||
--
|
||||
-- As well as the table, it adds:
|
||||
--
|
||||
-- * `queue_item_status`, an ENUM type.
|
||||
-- * `update_timestamp`, a trigger function.
|
||||
-- * Two triggers on `queue_items`.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql < $THIS_FILE
|
||||
--
|
||||
-- NOTE: It will fail harmlessly if applied twice.
|
||||
|
||||
|
||||
-- Queues are global, not per project,
|
||||
-- so they go in the `public` schema.
|
||||
|
||||
|
||||
CREATE TYPE queue_item_status
|
||||
AS ENUM (
|
||||
'queued',
|
||||
'cancelled',
|
||||
'failed',
|
||||
'sent'
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS queue_items (
|
||||
item_id serial NOT NULL PRIMARY KEY,
|
||||
-- One day we may want multiple queues, in that case we will
|
||||
-- have a queue_id and a relation of queue definitions.
|
||||
-- But not right now.
|
||||
-- queue_id integer NOT NULL REFERENCES queues (queue_id),
|
||||
status queue_item_status NOT NULL DEFAULT 'queued',
|
||||
payload jsonb NOT NULL,
|
||||
results jsonb NOT NULL DEFAULT '{}'::jsonb,
|
||||
created_on timestamptz NOT NULL DEFAULT current_timestamp,
|
||||
updated_on timestamptz NOT NULL DEFAULT current_timestamp,
|
||||
not_before timestamptz NOT NULL DEFAULT '1970-01-01T00:00:00Z',
|
||||
parent_id integer NULL REFERENCES queue_items (item_id)
|
||||
);
|
||||
|
||||
-- Sets `updated_on` to current_timestamp unless an explicit
|
||||
-- timestamp is part of the update.
|
||||
--
|
||||
-- This function can be reused with any table that has (or could have)
|
||||
-- an `updated_on` column of time timestamptz.
|
||||
CREATE OR REPLACE FUNCTION update_timestamp () RETURNS trigger AS
|
||||
$$
|
||||
BEGIN
|
||||
IF NEW.updated_on IS NOT NULL THEN
|
||||
NEW.updated_on := current_timestamp;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN undefined_column THEN RETURN NEW;
|
||||
END;
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER queue_items_tg0
|
||||
BEFORE INSERT OR UPDATE ON public.queue_items
|
||||
FOR EACH ROW EXECUTE FUNCTION public.update_timestamp();
|
||||
|
||||
CREATE TRIGGER queue_items_tg1
|
||||
AFTER INSERT OR DELETE OR UPDATE ON public.queue_items
|
||||
FOR EACH ROW EXECUTE FUNCTION public.notify('queue_items');
|
||||
24
etc/db/upgrades/upgrade09-74b3de5c→83be83e4-v0.1.0.sql
Normal file
24
etc/db/upgrades/upgrade09-74b3de5c→83be83e4-v0.1.0.sql
Normal file
@@ -0,0 +1,24 @@
|
||||
-- Upgrade the database from commit 74b3de5c to commit 83be83e4.
|
||||
--
|
||||
-- NOTE: This upgrade only affects the `public` schema.
|
||||
--
|
||||
-- This inserts a database schema version into the database.
|
||||
-- Note that we are not otherwise changing the schema, so older
|
||||
-- server code will continue to run against this version.
|
||||
--
|
||||
-- ATTENTION!
|
||||
--
|
||||
-- This value should be incremented every time that the database
|
||||
-- schema changes (either `public` or any of the survey schemas)
|
||||
-- and is used by the server at start-up to detect if it is
|
||||
-- running against a compatible schema version.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql < $THIS_FILE
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.1.0"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.1.0"}' WHERE public.info.key = 'version';
|
||||
84
etc/db/upgrades/upgrade10-83be83e4→53ed096e-v0.2.0.sql
Normal file
84
etc/db/upgrades/upgrade10-83be83e4→53ed096e-v0.2.0.sql
Normal file
@@ -0,0 +1,84 @@
|
||||
-- Upgrade the database from commit 83be83e4 to 53ed096e.
|
||||
--
|
||||
-- New schema version: 0.2.0
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This migrates the file hashes to address issue #173.
|
||||
-- The new hashes use size, modification time, creation time and the
|
||||
-- first half of the MD5 hex digest of the file's absolute path.
|
||||
--
|
||||
-- It's a minor (rather than patch) version number increment because
|
||||
-- changes to `bin/datastore.py` mean that the data is no longer
|
||||
-- compatible with the hashing function.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can take a while if run on a large database.
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE migrate_hashes (schema_name text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Migrating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
EXECUTE format('UPDATE %I.files SET hash = array_to_string(array_append(trim_array(string_to_array(hash, '':''), 1), left(md5(path), 16)), '':'')', schema_name);
|
||||
EXECUTE 'SET search_path TO public'; -- Back to the default search path for good measure
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE upgrade_10 () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL migrate_hashes(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL upgrade_10();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE migrate_hashes (schema_name text);
|
||||
DROP PROCEDURE upgrade_10 ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.2.0"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.2.0"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
189
etc/db/upgrades/upgrade11-v0.2.1-tstamp-functions.sql
Normal file
189
etc/db/upgrades/upgrade11-v0.2.1-tstamp-functions.sql
Normal file
@@ -0,0 +1,189 @@
|
||||
-- Add function to retrieve sequence/shotpoint from timestamps and vice-versa
|
||||
--
|
||||
-- New schema version: 0.2.1
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects the public schema.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- Two new functions are defined:
|
||||
--
|
||||
-- sequence_shot_from_tstamp(tstamp, [tolerance]) → sequence, point, delta
|
||||
--
|
||||
-- Returns a sequence + shotpoint if one falls within `tolerance` seconds
|
||||
-- of `tstamp`. The tolerance may be omitted in which case it defaults to
|
||||
-- three seconds. If multiple values match, it returns the closest in time.
|
||||
--
|
||||
-- tstamp_from_sequence_shot(sequence, point) → tstamp
|
||||
--
|
||||
-- Returns a timestamp given a sequence and point number.
|
||||
--
|
||||
-- NOTE: This last function must be called from a search path including a
|
||||
-- project schema, as it accesses the raw_shots table.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can take a while if run on a large database.
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
-- NOTE: This will lock the database while the transaction is active.
|
||||
--
|
||||
-- WARNING: Applying this upgrade drops the old tables. Ensure that you
|
||||
-- have migrated the data first.
|
||||
--
|
||||
-- NOTE: This is a patch version change so it does not require a
|
||||
-- backend restart.
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION tstamp_from_sequence_shot(
|
||||
IN s numeric,
|
||||
IN p numeric,
|
||||
OUT "ts" timestamptz)
|
||||
AS $inner$
|
||||
SELECT tstamp FROM raw_shots WHERE sequence = s AND point = p LIMIT 1;
|
||||
$inner$ LANGUAGE SQL;
|
||||
|
||||
|
||||
COMMENT ON FUNCTION tstamp_from_sequence_shot(numeric, numeric)
|
||||
IS 'Get the timestamp of an existing shotpoint.';
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION tstamp_interpolate(s numeric, p numeric) RETURNS timestamptz
|
||||
AS $inner$
|
||||
DECLARE
|
||||
ts0 timestamptz;
|
||||
ts1 timestamptz;
|
||||
pt0 numeric;
|
||||
pt1 numeric;
|
||||
BEGIN
|
||||
|
||||
SELECT tstamp, point
|
||||
INTO ts0, pt0
|
||||
FROM raw_shots
|
||||
WHERE sequence = s AND point < p
|
||||
ORDER BY point DESC LIMIT 1;
|
||||
|
||||
|
||||
SELECT tstamp, point
|
||||
INTO ts1, pt1
|
||||
FROM raw_shots
|
||||
WHERE sequence = s AND point > p
|
||||
ORDER BY point ASC LIMIT 1;
|
||||
|
||||
RETURN (ts1-ts0)/abs(pt1-pt0)*abs(p-pt0)+ts0;
|
||||
|
||||
END;
|
||||
$inner$ LANGUAGE PLPGSQL;
|
||||
|
||||
COMMENT ON FUNCTION tstamp_interpolate(numeric, numeric)
|
||||
IS 'Interpolate a timestamp given sequence and point values.
|
||||
|
||||
It will try to find the points immediately before and after in the sequence and interpolate into the gap, which may consist of multiple missed shots.
|
||||
|
||||
If called on an existing shotpoint it will return an interpolated timestamp as if the shotpoint did not exist, as opposed to returning its actual timestamp.
|
||||
|
||||
Returns NULL if it is not possible to interpolate.';
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.sequence_shot_from_tstamp(
|
||||
IN ts timestamptz,
|
||||
IN tolerance numeric,
|
||||
OUT "sequence" numeric,
|
||||
OUT "point" numeric,
|
||||
OUT "delta" numeric)
|
||||
AS $inner$
|
||||
SELECT
|
||||
(meta->>'_sequence')::numeric AS sequence,
|
||||
(meta->>'_point')::numeric AS point,
|
||||
extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ) AS delta
|
||||
FROM real_time_inputs
|
||||
WHERE
|
||||
meta ? '_sequence' AND
|
||||
abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts )) < tolerance
|
||||
ORDER BY abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ))
|
||||
LIMIT 1;
|
||||
$inner$ LANGUAGE SQL;
|
||||
|
||||
|
||||
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(timestamptz, numeric)
|
||||
IS 'Get sequence and shotpoint from timestamp.
|
||||
|
||||
Given a timestamp this function returns the closest shot to it within the given tolerance value.
|
||||
|
||||
This uses the `real_time_inputs` table and it does not give an indication of which project the shotpoint belongs to. It is assumed that a single project is being acquired at a given time.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.sequence_shot_from_tstamp(
|
||||
IN ts timestamptz,
|
||||
OUT "sequence" numeric,
|
||||
OUT "point" numeric,
|
||||
OUT "delta" numeric)
|
||||
AS $inner$
|
||||
SELECT * FROM public.sequence_shot_from_tstamp(ts, 3);
|
||||
$inner$ LANGUAGE SQL;
|
||||
|
||||
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(timestamptz)
|
||||
IS 'Get sequence and shotpoint from timestamp.
|
||||
|
||||
Overloaded form in which the tolerance value is implied and defaults to three seconds.';
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_database();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_database ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
|
||||
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.2.1"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.2.1"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
360
etc/db/upgrades/upgrade12-v0.2.2-new-event-log-schema.sql
Normal file
360
etc/db/upgrades/upgrade12-v0.2.2-new-event-log-schema.sql
Normal file
@@ -0,0 +1,360 @@
|
||||
-- Add new event log schema.
|
||||
--
|
||||
-- New schema version: 0.2.2
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
-- REQUIRES POSTGRESQL VERSION 14 OR NEWER
|
||||
-- (Because of CREATE OR REPLACE TRIGGER)
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This is a redesign of the event logging mechanism. The old mechanism
|
||||
-- relied on a distinction between sequence events (i.e., those which can
|
||||
-- be associated to a shotpoint within a sequence), timed events (those
|
||||
-- which occur outside any acquisition sequence) and so-called virtual
|
||||
-- events (deduced from the data). It was inflexible and inefficient,
|
||||
-- as most of the time we needed to merge those two types of events into
|
||||
-- a single view.
|
||||
--
|
||||
-- The new mechanism:
|
||||
-- - uses a single table
|
||||
-- - accepts sequence event entries for shots or sequences which may not (yet)
|
||||
-- exist. (https://gitlab.com/wgp/dougal/software/-/issues/170)
|
||||
-- - keeps edit history (https://gitlab.com/wgp/dougal/software/-/issues/138)
|
||||
-- - Keeps track of when an entry was made or subsequently edited.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can take a while if run on a large database.
|
||||
-- NOTE: It can be applied multiple times without ill effect, as long
|
||||
-- as the new tables did not previously exist. If they did, they will
|
||||
-- be emptied before migrating the data.
|
||||
--
|
||||
-- WARNING: Applying this upgrade migrates the old event data. It does
|
||||
-- NOT yet drop the old tables, which is handled in a separate script,
|
||||
-- leaving the actions here technically reversible without having to
|
||||
-- restore from backup.
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE SEQUENCE IF NOT EXISTS event_log_uid_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS event_log_full (
|
||||
-- uid is a unique id for each entry in the table,
|
||||
-- including revisions of an existing entry.
|
||||
uid integer NOT NULL PRIMARY KEY DEFAULT nextval('event_log_uid_seq'),
|
||||
-- All revisions of an entry share the same id.
|
||||
-- If inserting a new entry, id = uid.
|
||||
id integer NOT NULL,
|
||||
-- No default tstamp because, for instance, a user could
|
||||
-- enter a sequence/point event referring to the future.
|
||||
-- An external process should scan those at regular intervals
|
||||
-- and populate the tstamp as needed.
|
||||
tstamp timestamptz NULL,
|
||||
sequence integer NULL,
|
||||
point integer NULL,
|
||||
remarks text NOT NULL DEFAULT '',
|
||||
labels text[] NOT NULL DEFAULT ARRAY[]::text[],
|
||||
-- TODO: Need a geometry column? Let us check performance as it is
|
||||
-- and if needed either add a geometry column + spatial index.
|
||||
meta jsonb NOT NULL DEFAULT '{}'::jsonb,
|
||||
validity tstzrange NOT NULL CHECK (NOT isempty(validity)),
|
||||
-- We accept either:
|
||||
-- - Just a tstamp
|
||||
-- - Just a sequence / point pair
|
||||
-- - All three
|
||||
-- We don't accept:
|
||||
-- - A sequence without a point or vice-versa
|
||||
-- - Nothing being provided
|
||||
CHECK (
|
||||
(tstamp IS NOT NULL AND sequence IS NOT NULL AND point IS NOT NULL) OR
|
||||
(tstamp IS NOT NULL AND sequence IS NULL AND point IS NULL) OR
|
||||
(tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL)
|
||||
)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS event_log_id ON event_log_full USING btree (id);
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_log_full_insert() RETURNS TRIGGER AS $inner$
|
||||
BEGIN
|
||||
NEW.id := COALESCE(NEW.id, NEW.uid);
|
||||
NEW.validity := tstzrange(current_timestamp, NULL);
|
||||
NEW.meta = COALESCE(NEW.meta, '{}'::jsonb);
|
||||
NEW.labels = COALESCE(NEW.labels, ARRAY[]::text[]);
|
||||
IF cardinality(NEW.labels) > 0 THEN
|
||||
-- Remove duplicates
|
||||
SELECT array_agg(DISTINCT elements)
|
||||
INTO NEW.labels
|
||||
FROM (SELECT unnest(NEW.labels) AS elements) AS labels;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$inner$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE TRIGGER event_log_full_insert_tg
|
||||
BEFORE INSERT ON event_log_full
|
||||
FOR EACH ROW EXECUTE FUNCTION event_log_full_insert();
|
||||
|
||||
|
||||
-- The public.notify() trigger to alert clients that something has changed
|
||||
CREATE OR REPLACE TRIGGER event_log_full_notify_tg
|
||||
AFTER INSERT OR DELETE OR UPDATE
|
||||
ON event_log_full FOR EACH ROW EXECUTE FUNCTION public.notify('event');
|
||||
|
||||
--
|
||||
-- VIEW event_log
|
||||
--
|
||||
-- This is what is exposed to the user most of the time.
|
||||
-- It shows the current version of records in the event_log_full
|
||||
-- table.
|
||||
--
|
||||
-- The user applies edits to this table directly, which are
|
||||
-- processed via triggers.
|
||||
--
|
||||
|
||||
CREATE OR REPLACE VIEW event_log AS
|
||||
SELECT
|
||||
id, tstamp, sequence, point, remarks, labels, meta,
|
||||
uid <> id AS has_edits,
|
||||
lower(validity) AS modified_on
|
||||
FROM event_log_full
|
||||
WHERE validity @> current_timestamp;
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_log_update() RETURNS TRIGGER AS $inner$
|
||||
BEGIN
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
|
||||
-- Complete the tstamp if possible
|
||||
IF NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL AND NEW.tstamp IS NULL THEN
|
||||
SELECT COALESCE(
|
||||
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
|
||||
tstamp_interpolate(NEW.sequence, NEW.point)
|
||||
)
|
||||
INTO NEW.tstamp;
|
||||
END IF;
|
||||
|
||||
-- Any id that is provided will be ignored. The generated
|
||||
-- id will match uid.
|
||||
INSERT INTO event_log_full
|
||||
(tstamp, sequence, point, remarks, labels, meta)
|
||||
VALUES (NEW.tstamp, NEW.sequence, NEW.point, NEW.remarks, NEW.labels, NEW.meta);
|
||||
|
||||
RETURN NEW;
|
||||
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
-- Set end of validity and create a new entry with id
|
||||
-- matching that of the old entry.
|
||||
|
||||
-- NOTE: Do not allow updating an event that has meta.readonly = true
|
||||
IF EXISTS
|
||||
(SELECT *
|
||||
FROM event_log_full
|
||||
WHERE id = OLD.id AND (meta->>'readonly')::boolean IS TRUE)
|
||||
THEN
|
||||
RAISE check_violation USING MESSAGE = 'Cannot modify read-only entry';
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
-- If the sequence / point has changed, and no new tstamp is provided, get one
|
||||
IF NEW.sequence <> OLD.sequence OR NEW.point <> OLD.point
|
||||
AND NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL
|
||||
AND NEW.tstamp IS NULL OR NEW.tstamp = OLD.tstamp THEN
|
||||
SELECT COALESCE(
|
||||
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
|
||||
tstamp_interpolate(NEW.sequence, NEW.point)
|
||||
)
|
||||
INTO NEW.tstamp;
|
||||
END IF;
|
||||
|
||||
UPDATE event_log_full
|
||||
SET validity = tstzrange(lower(validity), current_timestamp)
|
||||
WHERE validity @> current_timestamp AND id = OLD.id;
|
||||
|
||||
-- Any attempt to modify id will be ignored.
|
||||
INSERT INTO event_log_full
|
||||
(id, tstamp, sequence, point, remarks, labels, meta)
|
||||
VALUES (OLD.id, NEW.tstamp, NEW.sequence, NEW.point, NEW.remarks, NEW.labels, NEW.meta);
|
||||
|
||||
RETURN NEW;
|
||||
|
||||
ELSIF (TG_OP = 'DELETE') THEN
|
||||
-- Set end of validity.
|
||||
|
||||
-- NOTE: We *do* allow deleting an event that has meta.readonly = true
|
||||
-- This could be of interest if for instance we wanted to keep the history
|
||||
-- of QC results for a point, provided that the QC routines write to
|
||||
-- event_log and not event_log_full
|
||||
UPDATE event_log_full
|
||||
SET validity = tstzrange(lower(validity), current_timestamp)
|
||||
WHERE validity @> current_timestamp AND id = OLD.id;
|
||||
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
END;
|
||||
$inner$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
CREATE OR REPLACE TRIGGER event_log_tg
|
||||
INSTEAD OF INSERT OR UPDATE OR DELETE ON event_log
|
||||
FOR EACH ROW EXECUTE FUNCTION event_log_update();
|
||||
|
||||
|
||||
-- NOTE
|
||||
-- This is where we migrate the actual data
|
||||
RAISE NOTICE 'Migrating schema %', schema_name;
|
||||
|
||||
-- We start by deleting any data that the new tables might
|
||||
-- have had if they already existed.
|
||||
DELETE FROM event_log_full;
|
||||
|
||||
-- We purposefully bypass event_log here, as the tables we're
|
||||
-- migrating from only contain a single version of each event.
|
||||
|
||||
INSERT INTO event_log_full (tstamp, sequence, point, remarks, labels, meta)
|
||||
SELECT
|
||||
tstamp, sequence, point, remarks, labels,
|
||||
meta || json_build_object('geometry', geometry, 'readonly', virtual)::jsonb
|
||||
FROM events;
|
||||
|
||||
UPDATE event_log_full SET meta = meta - 'geometry' WHERE meta->>'geometry' IS NULL;
|
||||
UPDATE event_log_full SET meta = meta - 'readonly' WHERE (meta->'readonly')::boolean IS false;
|
||||
|
||||
|
||||
-- This function used the superseded `events` view.
|
||||
-- We need to drop it because we're changing the return type.
|
||||
DROP FUNCTION IF EXISTS label_in_sequence (_sequence integer, _label text);
|
||||
|
||||
CREATE OR REPLACE FUNCTION label_in_sequence (_sequence integer, _label text)
|
||||
RETURNS event_log
|
||||
LANGUAGE sql
|
||||
AS $inner$
|
||||
SELECT * FROM event_log WHERE sequence = _sequence AND _label = ANY(labels);
|
||||
$inner$;
|
||||
|
||||
-- This function used the superseded `events` view (and a strange logic).
|
||||
CREATE OR REPLACE PROCEDURE handle_final_line_events (_seq integer, _label text, _column text)
|
||||
LANGUAGE plpgsql
|
||||
AS $inner$
|
||||
|
||||
DECLARE
|
||||
_line final_lines_summary%ROWTYPE;
|
||||
_column_value integer;
|
||||
_tg_name text := 'final_line';
|
||||
_event event_log%ROWTYPE;
|
||||
event_id integer;
|
||||
BEGIN
|
||||
|
||||
SELECT * INTO _line FROM final_lines_summary WHERE sequence = _seq;
|
||||
_event := label_in_sequence(_seq, _label);
|
||||
_column_value := row_to_json(_line)->>_column;
|
||||
|
||||
--RAISE NOTICE '% is %', _label, _event;
|
||||
--RAISE NOTICE 'Line is %', _line;
|
||||
--RAISE NOTICE '% is % (%)', _column, _column_value, _label;
|
||||
|
||||
IF _event IS NULL THEN
|
||||
--RAISE NOTICE 'We will populate the event log from the sequence data';
|
||||
|
||||
INSERT INTO event_log (sequence, point, remarks, labels, meta)
|
||||
VALUES (
|
||||
-- The sequence
|
||||
_seq,
|
||||
-- The shotpoint
|
||||
_column_value,
|
||||
-- Remark. Something like "FSP <linename>"
|
||||
format('%s %s', _label, (SELECT meta->>'lineName' FROM final_lines WHERE sequence = _seq)),
|
||||
-- Label
|
||||
ARRAY[_label],
|
||||
-- Meta. Something like {"auto" : {"FSP" : "final_line"}}
|
||||
json_build_object('auto', json_build_object(_label, _tg_name))
|
||||
);
|
||||
|
||||
ELSE
|
||||
--RAISE NOTICE 'We may populate the sequence meta from the event log';
|
||||
--RAISE NOTICE 'Unless the event log was populated by us previously';
|
||||
--RAISE NOTICE 'Populated by us previously? %', _event.meta->'auto'->>_label = _tg_name;
|
||||
|
||||
IF _event.meta->'auto'->>_label IS DISTINCT FROM _tg_name THEN
|
||||
|
||||
--RAISE NOTICE 'Adding % found in events log to final_line meta', _label;
|
||||
UPDATE final_lines
|
||||
SET meta = jsonb_set(meta, ARRAY[_label], to_jsonb(_event.point))
|
||||
WHERE sequence = _seq;
|
||||
|
||||
END IF;
|
||||
|
||||
END IF;
|
||||
END;
|
||||
$inner$;
|
||||
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_12 () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_12();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_12 ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
-- This is technically still compatible with 0.2.0 as we are only adding
|
||||
-- some more tables and views but not yet dropping the old ones, which we
|
||||
-- will do separately so that these scripts do not get too big.
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.2.2"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.2.2"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
98
etc/db/upgrades/upgrade13-v0.3.0-migrate-events.sql
Normal file
98
etc/db/upgrades/upgrade13-v0.3.0-migrate-events.sql
Normal file
@@ -0,0 +1,98 @@
|
||||
-- Migrate events to new schema
|
||||
--
|
||||
-- New schema version: 0.3.0
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This migrates the data from the old event log tables to the new schema.
|
||||
-- It is a *very* good idea to review the data manually after the migration
|
||||
-- as issues with the logs that had gone unnoticed may become evident now.
|
||||
--
|
||||
-- WARNING: If data exists in the new event tables, IT WILL BE TRUNCATED.
|
||||
--
|
||||
-- Other than that, this migration is fairly benign as it does not modify
|
||||
-- the old data.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can take a while if run on a large database.
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
-- NOTE: This will lock the new event tables while the transaction is active.
|
||||
--
|
||||
-- WARNING: This is a minor (not patch) version change, meaning that it requires
|
||||
-- an upgrade and restart of the backend server.
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
TRUNCATE event_log_full;
|
||||
|
||||
-- NOTE: meta->>'virtual' = TRUE means that the event was created algorithmically
|
||||
-- and should not be user editable.
|
||||
INSERT INTO event_log_full (tstamp, sequence, point, remarks, labels, meta)
|
||||
SELECT
|
||||
tstamp, sequence, point, remarks, labels,
|
||||
meta || json_build_object('geometry', geometry, 'readonly', virtual)::jsonb
|
||||
FROM events;
|
||||
|
||||
-- We purposefully bypass event_log here
|
||||
UPDATE event_log_full SET meta = meta - 'geometry' WHERE meta->>'geometry' IS NULL;
|
||||
UPDATE event_log_full SET meta = meta - 'readonly' WHERE (meta->'readonly')::boolean IS false;
|
||||
|
||||
END
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_database();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_database ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.0"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.0"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
99
etc/db/upgrades/upgrade14-v0.3.1-drop-old-event-tables.sql
Normal file
99
etc/db/upgrades/upgrade14-v0.3.1-drop-old-event-tables.sql
Normal file
@@ -0,0 +1,99 @@
|
||||
-- Drop old event tables.
|
||||
--
|
||||
-- New schema version: 0.3.1
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This completes the migration from the old event logging mechanism by
|
||||
-- DROPPING THE OLD DATABASE OBJECTS, MAKING THE MIGRATION IRREVERSIBLE,
|
||||
-- other than by restoring from backup and manually transferring any new
|
||||
-- data that may have been created in the meanwhile.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can take a while if run on a large database.
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
-- NOTE: This will lock the database while the transaction is active.
|
||||
--
|
||||
-- WARNING: Applying this upgrade drops the old tables. Ensure that you
|
||||
-- have migrated the data first.
|
||||
--
|
||||
-- NOTE: This is a patch version change so it does not require a
|
||||
-- backend restart.
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
DROP FUNCTION IF EXISTS
|
||||
label_in_sequence(integer,text), reset_events_serials();
|
||||
|
||||
DROP VIEW IF EXISTS
|
||||
events_midnight_shot, events_seq_timed, events_labels, "events";
|
||||
|
||||
DROP TABLE IF EXISTS
|
||||
events_seq_labels, events_timed_labels, events_timed_seq, events_seq, events_timed;
|
||||
|
||||
DROP SEQUENCE IF EXISTS
|
||||
events_seq_id_seq, events_timed_id_seq;
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_database();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_database ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.1"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.1"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
136
etc/db/upgrades/upgrade15-v0.3.2-fix-project-summary.sql
Normal file
136
etc/db/upgrades/upgrade15-v0.3.2-fix-project-summary.sql
Normal file
@@ -0,0 +1,136 @@
|
||||
-- Fix project_summary view.
|
||||
--
|
||||
-- New schema version: 0.3.2
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This fixes a problem with the project_summary view. In its common table
|
||||
-- expression, the view definition tried to search public.projects based on
|
||||
-- the search path value with the following expression:
|
||||
--
|
||||
-- (current_setting('search_path'::text) ~~ (p.schema || '%'::text))
|
||||
--
|
||||
-- That is of course bound to fail as soon as the schema goes above `survey_9`
|
||||
-- because `survey_10 LIKE ('survey_1' || '%')` is TRUE.
|
||||
--
|
||||
-- The new mechanism relies on splitting the search_path.
|
||||
--
|
||||
-- NOTE: The survey schema needs to be the leftmost element in search_path.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE VIEW project_summary AS
|
||||
WITH fls AS (
|
||||
SELECT avg((final_lines_summary.duration / ((final_lines_summary.num_points - 1))::double precision)) AS shooting_rate,
|
||||
avg((final_lines_summary.length / date_part('epoch'::text, final_lines_summary.duration))) AS speed,
|
||||
sum(final_lines_summary.duration) AS prod_duration,
|
||||
sum(final_lines_summary.length) AS prod_distance
|
||||
FROM final_lines_summary
|
||||
), project AS (
|
||||
SELECT p.pid,
|
||||
p.name,
|
||||
p.schema
|
||||
FROM public.projects p
|
||||
WHERE (split_part(current_setting('search_path'::text), ','::text, 1) = p.schema)
|
||||
)
|
||||
SELECT project.pid,
|
||||
project.name,
|
||||
project.schema,
|
||||
( SELECT count(*) AS count
|
||||
FROM preplot_lines
|
||||
WHERE (preplot_lines.class = 'V'::bpchar)) AS lines,
|
||||
ps.total,
|
||||
ps.virgin,
|
||||
ps.prime,
|
||||
ps.other,
|
||||
ps.ntba,
|
||||
ps.remaining,
|
||||
( SELECT to_json(fs.*) AS to_json
|
||||
FROM final_shots fs
|
||||
ORDER BY fs.tstamp
|
||||
LIMIT 1) AS fsp,
|
||||
( SELECT to_json(fs.*) AS to_json
|
||||
FROM final_shots fs
|
||||
ORDER BY fs.tstamp DESC
|
||||
LIMIT 1) AS lsp,
|
||||
( SELECT count(*) AS count
|
||||
FROM raw_lines rl) AS seq_raw,
|
||||
( SELECT count(*) AS count
|
||||
FROM final_lines rl) AS seq_final,
|
||||
fls.prod_duration,
|
||||
fls.prod_distance,
|
||||
fls.speed AS shooting_rate
|
||||
FROM preplot_summary ps,
|
||||
fls,
|
||||
project;
|
||||
|
||||
|
||||
ALTER TABLE project_summary OWNER TO postgres;
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_15 () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_15();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_15 ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.2"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.2"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
169
etc/db/upgrades/upgrade16-v0.3.3-fix-event-log-edit.sql
Normal file
169
etc/db/upgrades/upgrade16-v0.3.3-fix-event-log-edit.sql
Normal file
@@ -0,0 +1,169 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.3
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- The event_log_update() function that gets called when trying to update
|
||||
-- the event_log view will not work if the caller does provide a timestamp
|
||||
-- or sequence + point in the list of fields to be updated. See:
|
||||
-- https://gitlab.com/wgp/dougal/software/-/issues/198
|
||||
--
|
||||
-- This fixes the problem by liberally using COALESCE() to merge the OLD
|
||||
-- and NEW records.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_log_update() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $inner$
|
||||
BEGIN
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
|
||||
-- Complete the tstamp if possible
|
||||
IF NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL AND NEW.tstamp IS NULL THEN
|
||||
SELECT COALESCE(
|
||||
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
|
||||
tstamp_interpolate(NEW.sequence, NEW.point)
|
||||
)
|
||||
INTO NEW.tstamp;
|
||||
END IF;
|
||||
|
||||
-- Any id that is provided will be ignored. The generated
|
||||
-- id will match uid.
|
||||
INSERT INTO event_log_full
|
||||
(tstamp, sequence, point, remarks, labels, meta)
|
||||
VALUES (NEW.tstamp, NEW.sequence, NEW.point, NEW.remarks, NEW.labels, NEW.meta);
|
||||
|
||||
RETURN NEW;
|
||||
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
-- Set end of validity and create a new entry with id
|
||||
-- matching that of the old entry.
|
||||
|
||||
-- NOTE: Do not allow updating an event that has meta.readonly = true
|
||||
IF EXISTS
|
||||
(SELECT *
|
||||
FROM event_log_full
|
||||
WHERE id = OLD.id AND (meta->>'readonly')::boolean IS TRUE)
|
||||
THEN
|
||||
RAISE check_violation USING MESSAGE = 'Cannot modify read-only entry';
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
-- If the sequence / point has changed, and no new tstamp is provided, get one
|
||||
IF NEW.sequence <> OLD.sequence OR NEW.point <> OLD.point
|
||||
AND NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL
|
||||
AND NEW.tstamp IS NULL OR NEW.tstamp = OLD.tstamp THEN
|
||||
SELECT COALESCE(
|
||||
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
|
||||
tstamp_interpolate(NEW.sequence, NEW.point)
|
||||
)
|
||||
INTO NEW.tstamp;
|
||||
END IF;
|
||||
|
||||
UPDATE event_log_full
|
||||
SET validity = tstzrange(lower(validity), current_timestamp)
|
||||
WHERE validity @> current_timestamp AND id = OLD.id;
|
||||
|
||||
-- Any attempt to modify id will be ignored.
|
||||
INSERT INTO event_log_full
|
||||
(id, tstamp, sequence, point, remarks, labels, meta)
|
||||
VALUES (
|
||||
OLD.id,
|
||||
COALESCE(NEW.tstamp, OLD.tstamp),
|
||||
COALESCE(NEW.sequence, OLD.sequence),
|
||||
COALESCE(NEW.point, OLD.point),
|
||||
COALESCE(NEW.remarks, OLD.remarks),
|
||||
COALESCE(NEW.labels, OLD.labels),
|
||||
COALESCE(NEW.meta, OLD.meta)
|
||||
);
|
||||
|
||||
RETURN NEW;
|
||||
|
||||
ELSIF (TG_OP = 'DELETE') THEN
|
||||
-- Set end of validity.
|
||||
|
||||
-- NOTE: We *do* allow deleting an event that has meta.readonly = true
|
||||
-- This could be of interest if for instance we wanted to keep the history
|
||||
-- of QC results for a point, provided that the QC routines write to
|
||||
-- event_log and not event_log_full
|
||||
UPDATE event_log_full
|
||||
SET validity = tstzrange(lower(validity), current_timestamp)
|
||||
WHERE validity @> current_timestamp AND id = OLD.id;
|
||||
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
END;
|
||||
$inner$;
|
||||
|
||||
CREATE OR REPLACE TRIGGER event_log_tg INSTEAD OF INSERT OR DELETE OR UPDATE ON event_log FOR EACH ROW EXECUTE FUNCTION event_log_update();
|
||||
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_16 () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_16();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_16 ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.3"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.3"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
163
etc/db/upgrades/upgrade17-v0.3.4-geometry-functions.sql
Normal file
163
etc/db/upgrades/upgrade17-v0.3.4-geometry-functions.sql
Normal file
@@ -0,0 +1,163 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.4
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This creates a new procedure augment_event_data() which tries to
|
||||
-- populate missing event_log data, namely timestamps and geometries.
|
||||
--
|
||||
-- To do this it also adds a function public.geometry_from_tstamp()
|
||||
-- which, given a timestamp, tries to fetch a geometry from real_time_inputs.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE PROCEDURE augment_event_data ()
|
||||
LANGUAGE sql
|
||||
AS $inner$
|
||||
-- Populate the timestamp of sequence / point events
|
||||
UPDATE event_log_full
|
||||
SET tstamp = tstamp_from_sequence_shot(sequence, point)
|
||||
WHERE
|
||||
tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL;
|
||||
|
||||
-- Populate the geometry of sequence / point events for which
|
||||
-- there is raw_shots data.
|
||||
UPDATE event_log_full
|
||||
SET meta = meta ||
|
||||
jsonb_build_object(
|
||||
'geometry',
|
||||
(
|
||||
SELECT st_transform(geometry, 4326)::jsonb
|
||||
FROM raw_shots rs
|
||||
WHERE rs.sequence = event_log_full.sequence AND rs.point = event_log_full.point
|
||||
)
|
||||
)
|
||||
WHERE
|
||||
sequence IS NOT NULL AND point IS NOT NULL AND
|
||||
NOT meta ? 'geometry';
|
||||
|
||||
-- Populate the geometry of time-based events
|
||||
UPDATE event_log_full e
|
||||
SET
|
||||
meta = meta || jsonb_build_object('geometry',
|
||||
(SELECT st_transform(g.geometry, 4326)::jsonb
|
||||
FROM geometry_from_tstamp(e.tstamp, 3) g))
|
||||
WHERE
|
||||
tstamp IS NOT NULL AND
|
||||
sequence IS NULL AND point IS NULL AND
|
||||
NOT meta ? 'geometry';
|
||||
|
||||
-- Get rid of null geometries
|
||||
UPDATE event_log_full
|
||||
SET
|
||||
meta = meta - 'geometry'
|
||||
WHERE
|
||||
jsonb_typeof(meta->'geometry') = 'null';
|
||||
|
||||
-- Simplify the GeoJSON when the CRS is EPSG:4326
|
||||
UPDATE event_log_full
|
||||
SET
|
||||
meta = meta #- '{geometry, crs}'
|
||||
WHERE
|
||||
meta->'geometry'->'crs'->'properties'->>'name' = 'EPSG:4326';
|
||||
|
||||
$inner$;
|
||||
|
||||
COMMENT ON PROCEDURE augment_event_data()
|
||||
IS 'Populate missing timestamps and geometries in event_log_full';
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_17 () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
CALL show_notice('Adding index to real_time_inputs.meta->tstamp');
|
||||
CREATE INDEX IF NOT EXISTS meta_tstamp_idx
|
||||
ON public.real_time_inputs
|
||||
USING btree ((meta->>'tstamp') DESC);
|
||||
|
||||
CALL show_notice('Creating function geometry_from_tstamp');
|
||||
CREATE OR REPLACE FUNCTION public.geometry_from_tstamp(
|
||||
IN ts timestamptz,
|
||||
IN tolerance numeric,
|
||||
OUT "geometry" geometry,
|
||||
OUT "delta" numeric)
|
||||
AS $inner$
|
||||
SELECT
|
||||
geometry,
|
||||
extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ) AS delta
|
||||
FROM real_time_inputs
|
||||
WHERE
|
||||
geometry IS NOT NULL AND
|
||||
abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts )) < tolerance
|
||||
ORDER BY abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ))
|
||||
LIMIT 1;
|
||||
$inner$ LANGUAGE SQL;
|
||||
|
||||
COMMENT ON FUNCTION public.geometry_from_tstamp(timestamptz, numeric)
|
||||
IS 'Get geometry from timestamp';
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_17();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_17 ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.4"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.4"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
158
etc/db/upgrades/upgrade18-v0.3.5-label_in_sequence-function.sql
Normal file
158
etc/db/upgrades/upgrade18-v0.3.5-label_in_sequence-function.sql
Normal file
@@ -0,0 +1,158 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.5
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- The function label_in_sequence(integer, text) was missing for the
|
||||
-- production schemas. This patch (re-)defines the function as well
|
||||
-- as other function that depend on it (otherwise it does not get
|
||||
-- picked up).
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION label_in_sequence(_sequence integer, _label text) RETURNS event_log
|
||||
LANGUAGE sql
|
||||
AS $inner$
|
||||
SELECT * FROM event_log WHERE sequence = _sequence AND _label = ANY(labels);
|
||||
$inner$;
|
||||
|
||||
-- We need to redefine the functions / procedures that call label_in_sequence
|
||||
|
||||
CREATE OR REPLACE PROCEDURE handle_final_line_events(IN _seq integer, IN _label text, IN _column text)
|
||||
LANGUAGE plpgsql
|
||||
AS $inner$
|
||||
|
||||
DECLARE
|
||||
_line final_lines_summary%ROWTYPE;
|
||||
_column_value integer;
|
||||
_tg_name text := 'final_line';
|
||||
_event event_log%ROWTYPE;
|
||||
event_id integer;
|
||||
BEGIN
|
||||
|
||||
SELECT * INTO _line FROM final_lines_summary WHERE sequence = _seq;
|
||||
_event := label_in_sequence(_seq, _label);
|
||||
_column_value := row_to_json(_line)->>_column;
|
||||
|
||||
--RAISE NOTICE '% is %', _label, _event;
|
||||
--RAISE NOTICE 'Line is %', _line;
|
||||
--RAISE NOTICE '% is % (%)', _column, _column_value, _label;
|
||||
|
||||
IF _event IS NULL THEN
|
||||
--RAISE NOTICE 'We will populate the event log from the sequence data';
|
||||
|
||||
INSERT INTO event_log (sequence, point, remarks, labels, meta)
|
||||
VALUES (
|
||||
-- The sequence
|
||||
_seq,
|
||||
-- The shotpoint
|
||||
_column_value,
|
||||
-- Remark. Something like "FSP <linename>"
|
||||
format('%s %s', _label, (SELECT meta->>'lineName' FROM final_lines WHERE sequence = _seq)),
|
||||
-- Label
|
||||
ARRAY[_label],
|
||||
-- Meta. Something like {"auto" : {"FSP" : "final_line"}}
|
||||
json_build_object('auto', json_build_object(_label, _tg_name))
|
||||
);
|
||||
|
||||
ELSE
|
||||
--RAISE NOTICE 'We may populate the sequence meta from the event log';
|
||||
--RAISE NOTICE 'Unless the event log was populated by us previously';
|
||||
--RAISE NOTICE 'Populated by us previously? %', _event.meta->'auto'->>_label = _tg_name;
|
||||
|
||||
IF _event.meta->'auto'->>_label IS DISTINCT FROM _tg_name THEN
|
||||
|
||||
--RAISE NOTICE 'Adding % found in events log to final_line meta', _label;
|
||||
UPDATE final_lines
|
||||
SET meta = jsonb_set(meta, ARRAY[_label], to_jsonb(_event.point))
|
||||
WHERE sequence = _seq;
|
||||
|
||||
END IF;
|
||||
|
||||
END IF;
|
||||
END;
|
||||
$inner$;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE final_line_post_import(IN _seq integer)
|
||||
LANGUAGE plpgsql
|
||||
AS $inner$
|
||||
BEGIN
|
||||
|
||||
CALL handle_final_line_events(_seq, 'FSP', 'fsp');
|
||||
CALL handle_final_line_events(_seq, 'FGSP', 'fsp');
|
||||
CALL handle_final_line_events(_seq, 'LGSP', 'lsp');
|
||||
CALL handle_final_line_events(_seq, 'LSP', 'lsp');
|
||||
|
||||
END;
|
||||
$inner$;
|
||||
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_18 () AS $$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade_18();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade_18 ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.5"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.5"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
162
etc/db/upgrades/upgrade19-v0.3.6-optimise-geometry-functions.sql
Normal file
162
etc/db/upgrades/upgrade19-v0.3.6-optimise-geometry-functions.sql
Normal file
@@ -0,0 +1,162 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.6
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This optimises geometry_from_tstamp() by many orders of magnitude
|
||||
-- (issue #241). The redefinition of geometry_from_tstamp() necessitates
|
||||
-- redefining dependent functions.
|
||||
--
|
||||
-- We also drop the index on real_time_inputs.meta->'tstamp' as it is no
|
||||
-- longer used.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE PROCEDURE augment_event_data ()
|
||||
LANGUAGE sql
|
||||
AS $inner$
|
||||
-- Populate the timestamp of sequence / point events
|
||||
UPDATE event_log_full
|
||||
SET tstamp = tstamp_from_sequence_shot(sequence, point)
|
||||
WHERE
|
||||
tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL;
|
||||
|
||||
-- Populate the geometry of sequence / point events for which
|
||||
-- there is raw_shots data.
|
||||
UPDATE event_log_full
|
||||
SET meta = meta ||
|
||||
jsonb_build_object(
|
||||
'geometry',
|
||||
(
|
||||
SELECT st_transform(geometry, 4326)::jsonb
|
||||
FROM raw_shots rs
|
||||
WHERE rs.sequence = event_log_full.sequence AND rs.point = event_log_full.point
|
||||
)
|
||||
)
|
||||
WHERE
|
||||
sequence IS NOT NULL AND point IS NOT NULL AND
|
||||
NOT meta ? 'geometry';
|
||||
|
||||
-- Populate the geometry of time-based events
|
||||
UPDATE event_log_full e
|
||||
SET
|
||||
meta = meta || jsonb_build_object('geometry',
|
||||
(SELECT st_transform(g.geometry, 4326)::jsonb
|
||||
FROM geometry_from_tstamp(e.tstamp, 3) g))
|
||||
WHERE
|
||||
tstamp IS NOT NULL AND
|
||||
sequence IS NULL AND point IS NULL AND
|
||||
NOT meta ? 'geometry';
|
||||
|
||||
-- Get rid of null geometries
|
||||
UPDATE event_log_full
|
||||
SET
|
||||
meta = meta - 'geometry'
|
||||
WHERE
|
||||
jsonb_typeof(meta->'geometry') = 'null';
|
||||
|
||||
-- Simplify the GeoJSON when the CRS is EPSG:4326
|
||||
UPDATE event_log_full
|
||||
SET
|
||||
meta = meta #- '{geometry, crs}'
|
||||
WHERE
|
||||
meta->'geometry'->'crs'->'properties'->>'name' = 'EPSG:4326';
|
||||
|
||||
$inner$;
|
||||
|
||||
COMMENT ON PROCEDURE augment_event_data()
|
||||
IS 'Populate missing timestamps and geometries in event_log_full';
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
CALL show_notice('Dropping index from real_time_inputs.meta->tstamp');
|
||||
DROP INDEX IF EXISTS meta_tstamp_idx;
|
||||
|
||||
CALL show_notice('Creating function geometry_from_tstamp');
|
||||
CREATE OR REPLACE FUNCTION public.geometry_from_tstamp(
|
||||
IN ts timestamptz,
|
||||
IN tolerance numeric,
|
||||
OUT "geometry" geometry,
|
||||
OUT "delta" numeric)
|
||||
AS $inner$
|
||||
SELECT
|
||||
geometry,
|
||||
extract('epoch' FROM tstamp - ts ) AS delta
|
||||
FROM real_time_inputs
|
||||
WHERE
|
||||
geometry IS NOT NULL AND
|
||||
tstamp BETWEEN (ts - tolerance * interval '1 second') AND (ts + tolerance * interval '1 second')
|
||||
ORDER BY abs(extract('epoch' FROM tstamp - ts ))
|
||||
LIMIT 1;
|
||||
$inner$ LANGUAGE SQL;
|
||||
|
||||
COMMENT ON FUNCTION public.geometry_from_tstamp(timestamptz, numeric)
|
||||
IS 'Get geometry from timestamp';
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.6"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.6"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,254 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.7
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This updates the adjust_planner() procedure to take into account the
|
||||
-- new events schema (the `event` view has been replaced by `event_log`).
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CALL pg_temp.show_notice('Replacing adjust_planner() procedure');
|
||||
CREATE OR REPLACE PROCEDURE adjust_planner()
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
_planner_config jsonb;
|
||||
_planned_line planned_lines%ROWTYPE;
|
||||
_lag interval;
|
||||
_last_sequence sequences_summary%ROWTYPE;
|
||||
_deltatime interval;
|
||||
_shotinterval interval;
|
||||
_tstamp timestamptz;
|
||||
_incr integer;
|
||||
BEGIN
|
||||
|
||||
SET CONSTRAINTS planned_lines_pkey DEFERRED;
|
||||
|
||||
SELECT data->'planner'
|
||||
INTO _planner_config
|
||||
FROM file_data
|
||||
WHERE data ? 'planner';
|
||||
|
||||
SELECT *
|
||||
INTO _last_sequence
|
||||
FROM sequences_summary
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT *
|
||||
INTO _planned_line
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
|
||||
|
||||
SELECT
|
||||
COALESCE(
|
||||
((lead(ts0) OVER (ORDER BY sequence)) - ts1),
|
||||
make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer)
|
||||
)
|
||||
INTO _lag
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
|
||||
|
||||
_incr = sign(_last_sequence.lsp - _last_sequence.fsp);
|
||||
|
||||
RAISE NOTICE '_planner_config: %', _planner_config;
|
||||
RAISE NOTICE '_last_sequence: %', _last_sequence;
|
||||
RAISE NOTICE '_planned_line: %', _planned_line;
|
||||
RAISE NOTICE '_incr: %', _incr;
|
||||
|
||||
-- Does the latest sequence match a planned sequence?
|
||||
IF _planned_line IS NULL THEN -- No it doesn't
|
||||
RAISE NOTICE 'Latest sequence shot does not match a planned sequence';
|
||||
SELECT * INTO _planned_line FROM planned_lines ORDER BY sequence ASC LIMIT 1;
|
||||
RAISE NOTICE '_planned_line: %', _planned_line;
|
||||
|
||||
IF _planned_line.sequence <= _last_sequence.sequence THEN
|
||||
RAISE NOTICE 'Renumbering the planned sequences starting from %', _planned_line.sequence + 1;
|
||||
-- Renumber the planned sequences starting from last shot sequence number + 1
|
||||
UPDATE planned_lines
|
||||
SET sequence = sequence + _last_sequence.sequence - _planned_line.sequence + 1;
|
||||
END IF;
|
||||
|
||||
-- The correction to make to the first planned line's ts0 will be based on either the last
|
||||
-- sequence's EOL + default line change time or the current time, whichever is later.
|
||||
_deltatime := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1) + make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer), current_timestamp) - _planned_line.ts0;
|
||||
|
||||
-- Is the first of the planned lines start time in the past? (±5 mins)
|
||||
IF _planned_line.ts0 < (current_timestamp - make_interval(mins => 5)) THEN
|
||||
RAISE NOTICE 'First planned line is in the past. Adjusting times by %', _deltatime;
|
||||
-- Adjust the start / end time of the planned lines by assuming that we are at
|
||||
-- `defaultLineChangeDuration` minutes away from SOL of the first planned line.
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
ts0 = ts0 + _deltatime,
|
||||
ts1 = ts1 + _deltatime;
|
||||
END IF;
|
||||
|
||||
ELSE -- Yes it does
|
||||
RAISE NOTICE 'Latest sequence does match a planned sequence: %, %', _planned_line.sequence, _planned_line.line;
|
||||
|
||||
-- Is it online?
|
||||
IF EXISTS(SELECT 1 FROM raw_lines_files WHERE sequence = _last_sequence.sequence AND hash = '*online*') THEN
|
||||
-- Yes it is
|
||||
RAISE NOTICE 'Sequence % is online', _last_sequence.sequence;
|
||||
|
||||
-- Let us get the SOL from the events log if we can
|
||||
RAISE NOTICE 'Trying to set fsp, ts0 from events log FSP, FGSP';
|
||||
WITH e AS (
|
||||
SELECT * FROM event_log
|
||||
WHERE
|
||||
sequence = _last_sequence.sequence
|
||||
AND ('FSP' = ANY(labels) OR 'FGSP' = ANY(labels))
|
||||
ORDER BY tstamp LIMIT 1
|
||||
)
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
fsp = COALESCE(e.point, fsp),
|
||||
ts0 = COALESCE(e.tstamp, ts0)
|
||||
FROM e
|
||||
WHERE planned_lines.sequence = _last_sequence.sequence;
|
||||
|
||||
-- Shot interval
|
||||
_shotinterval := (_last_sequence.ts1 - _last_sequence.ts0) / abs(_last_sequence.lsp - _last_sequence.fsp);
|
||||
|
||||
RAISE NOTICE 'Estimating EOL from current shot interval: %', _shotinterval;
|
||||
|
||||
SELECT (abs(lsp-fsp) * _shotinterval + ts0) - ts1
|
||||
INTO _deltatime
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
---- Set ts1 for the current sequence
|
||||
--UPDATE planned_lines
|
||||
--SET
|
||||
--ts1 = (abs(lsp-fsp) * _shotinterval) + ts0
|
||||
--WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
RAISE NOTICE 'Adjustment is %', _deltatime;
|
||||
|
||||
IF abs(EXTRACT(EPOCH FROM _deltatime)) < 8 THEN
|
||||
RAISE NOTICE 'Adjustment too small (< 8 s), so not applying it';
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Adjust ts1 for the current sequence
|
||||
UPDATE planned_lines
|
||||
SET ts1 = ts1 + _deltatime
|
||||
WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
-- Now shift all sequences after
|
||||
UPDATE planned_lines
|
||||
SET ts0 = ts0 + _deltatime, ts1 = ts1 + _deltatime
|
||||
WHERE sequence > _last_sequence.sequence;
|
||||
|
||||
RAISE NOTICE 'Deleting planned sequences before %', _planned_line.sequence;
|
||||
-- Remove all previous planner entries.
|
||||
DELETE
|
||||
FROM planned_lines
|
||||
WHERE sequence < _last_sequence.sequence;
|
||||
|
||||
ELSE
|
||||
-- No it isn't
|
||||
RAISE NOTICE 'Sequence % is offline', _last_sequence.sequence;
|
||||
|
||||
-- We were supposed to finish at _planned_line.ts1 but we finished at:
|
||||
_tstamp := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1), current_timestamp);
|
||||
-- WARNING Next line is for testing only
|
||||
--_tstamp := COALESCE(_last_sequence.ts1_final, _last_sequence.ts1);
|
||||
-- So we need to adjust timestamps by:
|
||||
_deltatime := _tstamp - _planned_line.ts1;
|
||||
|
||||
RAISE NOTICE 'Planned end: %, actual end: % (%, %)', _planned_line.ts1, _tstamp, _planned_line.sequence, _last_sequence.sequence;
|
||||
RAISE NOTICE 'Shifting times by % for sequences > %', _deltatime, _planned_line.sequence;
|
||||
-- NOTE: This won't work if sequences are not, err… sequential.
|
||||
-- NOTE: This has been known to happen in 2020.
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
ts0 = ts0 + _deltatime,
|
||||
ts1 = ts1 + _deltatime
|
||||
WHERE sequence > _planned_line.sequence;
|
||||
|
||||
RAISE NOTICE 'Deleting planned sequences up to %', _planned_line.sequence;
|
||||
-- Remove all previous planner entries.
|
||||
DELETE
|
||||
FROM planned_lines
|
||||
WHERE sequence <= _last_sequence.sequence;
|
||||
|
||||
END IF;
|
||||
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.7"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.7"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
267
etc/db/upgrades/upgrade21-v0.3.8-add-event-data-functions.sql
Normal file
267
etc/db/upgrades/upgrade21-v0.3.8-add-event-data-functions.sql
Normal file
@@ -0,0 +1,267 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.8
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This adds event_position() and event_meta() functions which are used
|
||||
-- to retrieve position or metadata, respectively, given either a timestamp
|
||||
-- or a sequence / point pair. Intended to be used in the context of #229.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
--
|
||||
-- event_position(): Fetch event position
|
||||
--
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_position (
|
||||
tstamp timestamptz, sequence integer, point integer, tolerance numeric
|
||||
)
|
||||
RETURNS geometry
|
||||
AS $$
|
||||
DECLARE
|
||||
position geometry;
|
||||
BEGIN
|
||||
|
||||
-- Try and get position by sequence / point first
|
||||
IF sequence IS NOT NULL AND point IS NOT NULL THEN
|
||||
-- Try and get the position from final_shots or raw_shots
|
||||
SELECT COALESCE(f.geometry, r.geometry) geometry
|
||||
INTO position
|
||||
FROM raw_shots r LEFT JOIN final_shots f USING (sequence, point)
|
||||
WHERE r.sequence = event_position.sequence AND r.point = event_position.point;
|
||||
|
||||
IF position IS NOT NULL THEN
|
||||
RETURN position;
|
||||
ELSIF tstamp IS NULL THEN
|
||||
-- Get the timestamp for the sequence / point, if we can.
|
||||
-- It will be used later in the function as we fall back
|
||||
-- to timestamp based search.
|
||||
-- We also adjust the tolerance as we're now dealing with
|
||||
-- an exact timestamp.
|
||||
SELECT COALESCE(f.tstamp, r.tstamp) tstamp, 0.002 tolerance
|
||||
INTO tstamp, tolerance
|
||||
FROM raw_shots r LEFT JOIN final_shots f USING (sequence, point)
|
||||
WHERE r.sequence = event_position.sequence AND r.point = event_position.point;
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
-- If we got here, we better have a timestamp
|
||||
-- First attempt, get a position from final_shots, raw_shots. This may
|
||||
-- be redundant if we got here from the position of having a sequence /
|
||||
-- point without a position, but never mind.
|
||||
SELECT COALESCE(f.geometry, r.geometry) geometry
|
||||
INTO position
|
||||
FROM raw_shots r LEFT JOIN final_shots f USING (sequence, point)
|
||||
WHERE r.tstamp = event_position.tstamp OR f.tstamp = event_position.tstamp
|
||||
LIMIT 1; -- Just to be sure
|
||||
|
||||
IF position IS NULL THEN
|
||||
-- Ok, so everything else so far has failed, let's try and get this
|
||||
-- from real time data. We skip the search via sequence / point and
|
||||
-- go directly for timestamp.
|
||||
SELECT geometry
|
||||
INTO position
|
||||
FROM geometry_from_tstamp(tstamp, tolerance);
|
||||
END IF;
|
||||
|
||||
RETURN position;
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_position (timestamptz, integer, integer, numeric) IS
|
||||
'Return the position associated with a sequence / point in the current project or
|
||||
with a given timestamp. Timestamp that is first searched for in the shot tables
|
||||
of the current prospect or, if not found, in the real-time data.
|
||||
|
||||
Returns a geometry.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_position (
|
||||
tstamp timestamptz, sequence integer, point integer
|
||||
)
|
||||
RETURNS geometry
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_position(tstamp, sequence, point, 3);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_position (timestamptz, integer, integer) IS
|
||||
'Overload of event_position with a default tolerance of three seconds.';
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_position (
|
||||
tstamp timestamptz
|
||||
)
|
||||
RETURNS geometry
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_position(tstamp, NULL, NULL);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_position (timestamptz) IS
|
||||
'Overload of event_position (timestamptz, integer, integer) for use when searching by timestamp.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_position (
|
||||
sequence integer, point integer
|
||||
)
|
||||
RETURNS geometry
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_position(NULL, sequence, point);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_position (integer, integer) IS
|
||||
'Overload of event_position (timestamptz, integer, integer) for use when searching by sequence / point.';
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
--
|
||||
-- event_meta(): Fetch event metadata
|
||||
--
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_meta (
|
||||
tstamp timestamptz, sequence integer, point integer
|
||||
)
|
||||
RETURNS jsonb
|
||||
AS $$
|
||||
DECLARE
|
||||
result jsonb;
|
||||
-- Tolerance is hard-coded, at least until a need to expose arises.
|
||||
tolerance numeric;
|
||||
BEGIN
|
||||
tolerance := 3; -- seconds
|
||||
|
||||
-- We search by timestamp if we can, as that's a lot quicker
|
||||
IF tstamp IS NOT NULL THEN
|
||||
|
||||
SELECT meta
|
||||
INTO result
|
||||
FROM real_time_inputs rti
|
||||
WHERE
|
||||
rti.tstamp BETWEEN (event_meta.tstamp - tolerance * interval '1 second') AND (event_meta.tstamp + tolerance * interval '1 second')
|
||||
ORDER BY abs(extract('epoch' FROM rti.tstamp - event_meta.tstamp ))
|
||||
LIMIT 1;
|
||||
|
||||
ELSE
|
||||
|
||||
SELECT meta
|
||||
INTO result
|
||||
FROM real_time_inputs rti
|
||||
WHERE
|
||||
(meta->>'_sequence')::integer = event_meta.sequence AND
|
||||
(meta->>'_point')::integer = event_meta.point
|
||||
ORDER BY rti.tstamp DESC
|
||||
LIMIT 1;
|
||||
|
||||
END IF;
|
||||
|
||||
RETURN result;
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_meta (timestamptz, integer, integer) IS
|
||||
'Return the real-time event metadata associated with a sequence / point in the current project or
|
||||
with a given timestamp. Timestamp that is first searched for in the shot tables
|
||||
of the current prospect or, if not found, in the real-time data.
|
||||
|
||||
Returns a JSONB object.';
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_meta (
|
||||
tstamp timestamptz
|
||||
)
|
||||
RETURNS jsonb
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_meta(tstamp, NULL, NULL);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_meta (timestamptz) IS
|
||||
'Overload of event_meta (timestamptz, integer, integer) for use when searching by timestamp.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_meta (
|
||||
sequence integer, point integer
|
||||
)
|
||||
RETURNS jsonb
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN event_meta(NULL, sequence, point);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION event_meta (integer, integer) IS
|
||||
'Overload of event_meta (timestamptz, integer, integer) for use when searching by sequence / point.';
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.8"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.8"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,229 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.9
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This defines a replace_placeholders() function, taking as arguments
|
||||
-- a text string and either a timestamp or a sequence / point pair. It
|
||||
-- uses the latter arguments to find metadata from which it can extract
|
||||
-- relevant information and replace it into the text string wherever the
|
||||
-- appropriate placeholders appear. For instance, given a call such as
|
||||
-- replace_placeholders('The position is @POS@', NULL, 11, 2600) it will
|
||||
-- replace '@POS@' with the position of point 2600 in sequence 11, if it
|
||||
-- exists (or leave the placeholder untouched otherwise).
|
||||
--
|
||||
-- A scan_placeholders() procedure is also defined, which calls the above
|
||||
-- function on the entire event log.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION replace_placeholders (
|
||||
text_in text, tstamp timestamptz, sequence integer, point integer
|
||||
)
|
||||
RETURNS text
|
||||
AS $$
|
||||
DECLARE
|
||||
position geometry;
|
||||
metadata jsonb;
|
||||
text_out text;
|
||||
|
||||
json_query text;
|
||||
json_result jsonb;
|
||||
expect_recursion boolean := false;
|
||||
BEGIN
|
||||
|
||||
text_out := text_in;
|
||||
|
||||
-- We only get a position if we are going to need it…
|
||||
IF regexp_match(text_out, '@DMS@|@POS@|@DEG@') IS NOT NULL THEN
|
||||
position := ST_Transform(event_position(tstamp, sequence, point), 4326);
|
||||
END IF;
|
||||
|
||||
-- …and likewise with the metadata.
|
||||
IF regexp_match(text_out, '@BSP@|@WD@|@CMG@|@EN@|@GRID@|@(\$\..*?)@@') IS NOT NULL THEN
|
||||
metadata := event_meta(tstamp, sequence, point);
|
||||
END IF;
|
||||
|
||||
-- We shortcut the evaluation if neither of the above regexps matched
|
||||
IF position IS NULL AND metadata IS NULL THEN
|
||||
RETURN text_out;
|
||||
END IF;
|
||||
|
||||
IF position('@DMS@' IN text_out) != 0 THEN
|
||||
text_out := replace(text_out, '@DMS@', ST_AsLatLonText(position));
|
||||
END IF;
|
||||
|
||||
IF position('@POS@' IN text_out) != 0 THEN
|
||||
text_out := replace(text_out, '@POS@', replace(ST_AsLatLonText(position, 'D.DDDDDD'), ' ', ', '));
|
||||
END IF;
|
||||
|
||||
IF position('@DEG@' IN text_out) != 0 THEN
|
||||
text_out := replace(text_out, '@DEG@', replace(ST_AsLatLonText(position, 'D.DDDDDD'), ' ', ', '));
|
||||
END IF;
|
||||
|
||||
IF position('@EN@' IN text_out) != 0 THEN
|
||||
IF metadata ? 'easting' AND metadata ? 'northing' THEN
|
||||
text_out := replace(text_out, '@EN@', (metadata->>'easting') || ', ' || (metadata->>'northing'));
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
IF position('@GRID@' IN text_out) != 0 THEN
|
||||
IF metadata ? 'easting' AND metadata ? 'northing' THEN
|
||||
text_out := replace(text_out, '@GRID@', (metadata->>'easting') || ', ' || (metadata->>'northing'));
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
IF position('@CMG@' IN text_out) != 0 THEN
|
||||
IF metadata ? 'bearing' THEN
|
||||
text_out := replace(text_out, '@CMG@', metadata->>'bearing');
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
IF position('@BSP@' IN text_out) != 0 THEN
|
||||
IF metadata ? 'speed' THEN
|
||||
text_out := replace(text_out, '@BSP@', round((metadata->>'speed')::numeric * 3600 / 1852, 1)::text);
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
IF position('@WD@' IN text_out) != 0 THEN
|
||||
IF metadata ? 'waterDepth' THEN
|
||||
text_out := replace(text_out, '@WD@', metadata->>'waterDepth');
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
json_query := (regexp_match(text_out, '@(\$\..*?)@@'))[1];
|
||||
IF json_query IS NOT NULL THEN
|
||||
json_result := jsonb_path_query_array(metadata, json_query::jsonpath);
|
||||
IF jsonb_array_length(json_result) = 1 THEN
|
||||
text_out := replace(text_out, '@'||json_query||'@@', json_result->>0);
|
||||
ELSE
|
||||
text_out := replace(text_out, '@'||json_query||'@@', json_result::text);
|
||||
END IF;
|
||||
-- There might be multiple JSONPath queries, so we may have to recurse
|
||||
expect_recursion := true;
|
||||
END IF;
|
||||
|
||||
IF expect_recursion IS TRUE AND text_in != text_out THEN
|
||||
--RAISE NOTICE 'Recursing %', text_out;
|
||||
-- We don't know if we have found all the JSONPath expression
|
||||
-- so we do another pass.
|
||||
RETURN replace_placeholders(text_out, tstamp, sequence, point);
|
||||
ELSE
|
||||
RETURN text_out;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION replace_placeholders (text, timestamptz, integer, integer) IS
|
||||
'Replace certain placeholder strings in the input text with data obtained from shot or real-time data.';
|
||||
|
||||
|
||||
CREATE OR REPLACE PROCEDURE scan_placeholders ()
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
-- We update non read-only events via the event_log view to leave a trace
|
||||
-- of the fact that placeholders were replaced (and when).
|
||||
-- Note that this will not replace placeholders of old edits.
|
||||
UPDATE event_log
|
||||
SET remarks = replace_placeholders(remarks, tstamp, sequence, point)
|
||||
FROM (
|
||||
SELECT id
|
||||
FROM event_log e
|
||||
WHERE
|
||||
(meta->'readonly')::boolean IS NOT TRUE AND (
|
||||
regexp_match(remarks, '@DMS@|@POS@|@DEG@') IS NOT NULL OR
|
||||
regexp_match(remarks, '@BSP@|@WD@|@CMG@|@EN@|@GRID@|@(\$\..*?)@@') IS NOT NULL
|
||||
)
|
||||
) t
|
||||
WHERE event_log.id = t.id;
|
||||
|
||||
-- And then we update read-only events directly on the event_log_full table
|
||||
-- (as of this version of the schema we're prevented from updating read-only
|
||||
-- events via event_log anyway).
|
||||
UPDATE event_log_full
|
||||
SET remarks = replace_placeholders(remarks, tstamp, sequence, point)
|
||||
FROM (
|
||||
SELECT uid
|
||||
FROM event_log_full e
|
||||
WHERE
|
||||
(meta->'readonly')::boolean IS TRUE AND (
|
||||
regexp_match(remarks, '@DMS@|@POS@|@DEG@') IS NOT NULL OR
|
||||
regexp_match(remarks, '@BSP@|@WD@|@CMG@|@EN@|@GRID@|@(\$\..*?)@@') IS NOT NULL
|
||||
)
|
||||
) t
|
||||
WHERE event_log_full.uid = t.uid;
|
||||
$$;
|
||||
|
||||
COMMENT ON PROCEDURE scan_placeholders () IS
|
||||
'Run replace_placeholders() on the entire event log.';
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.9"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.9"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,127 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.10
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects only the public schema.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This defines a interpolate_geometry_from_tstamp(), taking a timestamp
|
||||
-- and a maximum timespan in seconds. It will then interpolate a position
|
||||
-- at the exact timestamp based on data from real_time_inputs, provided
|
||||
-- that the effective interpolation timespan does not exceed the maximum
|
||||
-- requested.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
BEGIN
|
||||
|
||||
CALL pg_temp.show_notice('Defining interpolate_geometry_from_tstamp()');
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.interpolate_geometry_from_tstamp(
|
||||
IN ts timestamptz,
|
||||
IN maxspan numeric
|
||||
)
|
||||
RETURNS geometry
|
||||
AS $$
|
||||
DECLARE
|
||||
ts0 timestamptz;
|
||||
ts1 timestamptz;
|
||||
geom0 geometry;
|
||||
geom1 geometry;
|
||||
span numeric;
|
||||
fraction numeric;
|
||||
BEGIN
|
||||
|
||||
SELECT tstamp, geometry
|
||||
INTO ts0, geom0
|
||||
FROM real_time_inputs
|
||||
WHERE tstamp <= ts
|
||||
ORDER BY tstamp DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT tstamp, geometry
|
||||
INTO ts1, geom1
|
||||
FROM real_time_inputs
|
||||
WHERE tstamp >= ts
|
||||
ORDER BY tstamp ASC
|
||||
LIMIT 1;
|
||||
|
||||
IF geom0 IS NULL OR geom1 IS NULL THEN
|
||||
RAISE NOTICE 'Interpolation failed (no straddling data)';
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
-- See if we got an exact match
|
||||
IF ts0 = ts THEN
|
||||
RETURN geom0;
|
||||
ELSIF ts1 = ts THEN
|
||||
RETURN geom1;
|
||||
END IF;
|
||||
|
||||
span := extract('epoch' FROM ts1 - ts0);
|
||||
|
||||
IF span > maxspan THEN
|
||||
RAISE NOTICE 'Interpolation timespan % outside maximum requested (%)', span, maxspan;
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
fraction := extract('epoch' FROM ts - ts0) / span;
|
||||
|
||||
IF fraction < 0 OR fraction > 1 THEN
|
||||
RAISE NOTICE 'Requested timestamp % outside of interpolation span (fraction: %)', ts, fraction;
|
||||
RETURN NULL;
|
||||
END IF;
|
||||
|
||||
RETURN ST_LineInterpolatePoint(St_MakeLine(geom0, geom1), fraction);
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION public.interpolate_geometry_from_tstamp(timestamptz, numeric) IS
|
||||
'Interpolate a position over a given maximum timespan (in seconds)
|
||||
based on real-time inputs. Returns a POINT geometry.';
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.10"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.10"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,149 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.11
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This redefines augment_event_data() to use interpolation rather than
|
||||
-- nearest neighbour. It now takes an argument indicating the maximum
|
||||
-- allowed interpolation timespan. An overload with a default of ten
|
||||
-- minutes is also provided, as an in situ replacement for the previous
|
||||
-- version.
|
||||
--
|
||||
-- The ten minute default is based on Triggerfish headers behaviour seen
|
||||
-- on crew 248 during soft starts.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE PROCEDURE augment_event_data (maxspan numeric)
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
-- Populate the timestamp of sequence / point events
|
||||
UPDATE event_log_full
|
||||
SET tstamp = tstamp_from_sequence_shot(sequence, point)
|
||||
WHERE
|
||||
tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL;
|
||||
|
||||
-- Populate the geometry of sequence / point events for which
|
||||
-- there is raw_shots data.
|
||||
UPDATE event_log_full
|
||||
SET meta = meta ||
|
||||
jsonb_build_object(
|
||||
'geometry',
|
||||
(
|
||||
SELECT st_transform(geometry, 4326)::jsonb
|
||||
FROM raw_shots rs
|
||||
WHERE rs.sequence = event_log_full.sequence AND rs.point = event_log_full.point
|
||||
)
|
||||
)
|
||||
WHERE
|
||||
sequence IS NOT NULL AND point IS NOT NULL AND
|
||||
NOT meta ? 'geometry';
|
||||
|
||||
-- Populate the geometry of time-based events
|
||||
UPDATE event_log_full e
|
||||
SET
|
||||
meta = meta || jsonb_build_object('geometry',
|
||||
(SELECT st_transform(g.geometry, 4326)::jsonb
|
||||
FROM interpolate_geometry_from_tstamp(e.tstamp, maxspan) g))
|
||||
WHERE
|
||||
tstamp IS NOT NULL AND
|
||||
sequence IS NULL AND point IS NULL AND
|
||||
NOT meta ? 'geometry';
|
||||
|
||||
-- Get rid of null geometries
|
||||
UPDATE event_log_full
|
||||
SET
|
||||
meta = meta - 'geometry'
|
||||
WHERE
|
||||
jsonb_typeof(meta->'geometry') = 'null';
|
||||
|
||||
-- Simplify the GeoJSON when the CRS is EPSG:4326
|
||||
UPDATE event_log_full
|
||||
SET
|
||||
meta = meta #- '{geometry, crs}'
|
||||
WHERE
|
||||
meta->'geometry'->'crs'->'properties'->>'name' = 'EPSG:4326';
|
||||
|
||||
$$;
|
||||
|
||||
COMMENT ON PROCEDURE augment_event_data(numeric)
|
||||
IS 'Populate missing timestamps and geometries in event_log_full';
|
||||
|
||||
CREATE OR REPLACE PROCEDURE augment_event_data ()
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
CALL augment_event_data(600);
|
||||
$$;
|
||||
|
||||
COMMENT ON PROCEDURE augment_event_data()
|
||||
IS 'Overload of augment_event_data(maxspan numeric) with a maxspan value of 600 seconds.';
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.11"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.11"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,193 @@
|
||||
-- Fix not being able to edit a time-based event.
|
||||
--
|
||||
-- New schema version: 0.3.12
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This defines a midnight_shots view and a log_midnight_shots() procedure
|
||||
-- (with some overloads). The view returns all points straddling midnight
|
||||
-- UTC and belonging to the same sequence (so last shot of the day and
|
||||
-- first shot of the next day).
|
||||
--
|
||||
-- The procedure inserts the corresponding events (optionally constrained
|
||||
-- by an earliest and a latest date) in the event log, unless the events
|
||||
-- already exist.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE VIEW midnight_shots AS
|
||||
WITH straddlers AS (
|
||||
-- Get sequence numbers straddling midnight UTC
|
||||
SELECT sequence
|
||||
FROM final_shots
|
||||
GROUP BY sequence
|
||||
HAVING min(date(tstamp)) != max(date(tstamp))
|
||||
),
|
||||
ts AS (
|
||||
-- Get earliest and latest timestamps for each day
|
||||
-- for each of the above sequences.
|
||||
-- This will return the timestamps for:
|
||||
-- FSP, LDSP, FDSP, LSP.
|
||||
SELECT
|
||||
fs.sequence,
|
||||
min(fs.tstamp) AS ts0,
|
||||
max(fs.tstamp) AS ts1
|
||||
FROM final_shots fs INNER JOIN straddlers USING (sequence)
|
||||
GROUP BY fs.sequence, (date(fs.tstamp))
|
||||
ORDER BY fs.sequence, date(fs.tstamp)
|
||||
),
|
||||
spts AS (
|
||||
-- Filter out FSP, LSP from the above.
|
||||
-- NOTE: This *should* in theory be able to cope with
|
||||
-- a sequence longer than 24 hours (so with more than
|
||||
-- one LDSP, FDSP) but that hasn't been tested.
|
||||
SELECT DISTINCT
|
||||
sequence,
|
||||
min(ts1) OVER (PARTITION BY sequence) ldsp,
|
||||
max(ts0) OVER (PARTITION BY sequence) fdsp
|
||||
FROM ts
|
||||
ORDER BY sequence
|
||||
), evt AS (
|
||||
SELECT
|
||||
fs.tstamp,
|
||||
fs.sequence,
|
||||
point,
|
||||
'Last shotpoint of the day' remarks,
|
||||
'{LDSP}'::text[] labels
|
||||
FROM final_shots fs
|
||||
INNER JOIN spts ON fs.sequence = spts.sequence AND fs.tstamp = spts.ldsp
|
||||
UNION SELECT
|
||||
fs.tstamp,
|
||||
fs.sequence,
|
||||
point,
|
||||
'First shotpoint of the day' remarks,
|
||||
'{FDSP}'::text[] labels
|
||||
FROM final_shots fs
|
||||
INNER JOIN spts ON fs.sequence = spts.sequence AND fs.tstamp = spts.fdsp
|
||||
ORDER BY tstamp
|
||||
)
|
||||
SELECT * FROM evt;
|
||||
|
||||
|
||||
CREATE OR REPLACE PROCEDURE log_midnight_shots (dt0 date, dt1 date)
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
INSERT INTO event_log (sequence, point, remarks, labels, meta)
|
||||
SELECT
|
||||
sequence, point, remarks, labels,
|
||||
'{"auto": true, "insertedBy": "log_midnight_shots"}'::jsonb
|
||||
FROM midnight_shots ms
|
||||
WHERE
|
||||
(dt0 IS NULL OR ms.tstamp >= dt0) AND
|
||||
(dt1 IS NULL OR ms.tstamp <= dt1) AND
|
||||
NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM event_log el
|
||||
WHERE ms.sequence = el.sequence AND ms.point = el.point AND el.labels @> ms.labels
|
||||
);
|
||||
|
||||
-- Delete any midnight shots that might have been inserted in the log
|
||||
-- but are no longer relevant according to the final_shots data.
|
||||
-- We operate on event_log, so the deletion is traceable.
|
||||
DELETE
|
||||
FROM event_log
|
||||
WHERE id IN (
|
||||
SELECT id
|
||||
FROM event_log el
|
||||
LEFT JOIN midnight_shots ms USING (sequence, point)
|
||||
WHERE
|
||||
'{LDSP,FDSP}'::text[] && el.labels -- &&: Do the arrays overlap?
|
||||
AND ms.sequence IS NULL
|
||||
);
|
||||
$$;
|
||||
|
||||
COMMENT ON PROCEDURE log_midnight_shots (date, date)
|
||||
IS 'Add midnight shots between two dates dt0 and dt1 to the event_log, unless the events already exist.';
|
||||
|
||||
|
||||
CREATE OR REPLACE PROCEDURE log_midnight_shots (dt0 date)
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
CALL log_midnight_shots(dt0, NULL);
|
||||
$$;
|
||||
|
||||
COMMENT ON PROCEDURE log_midnight_shots (date)
|
||||
IS 'Overload taking only a dt0 (adds events on that date or after).';
|
||||
|
||||
CREATE OR REPLACE PROCEDURE log_midnight_shots ()
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
CALL log_midnight_shots(NULL, NULL);
|
||||
$$;
|
||||
|
||||
COMMENT ON PROCEDURE log_midnight_shots ()
|
||||
IS 'Overload taking no arguments (adds all missing events).';
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
BEGIN
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.12"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.12"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
162
etc/db/upgrades/upgrade26-v0.3.13-fix-missing-shots-summary.sql
Normal file
162
etc/db/upgrades/upgrade26-v0.3.13-fix-missing-shots-summary.sql
Normal file
@@ -0,0 +1,162 @@
|
||||
-- Fix wrong number of missing shots in summary views
|
||||
--
|
||||
-- New schema version: 0.3.13
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- Fixes a bug in the `final_lines_summary` and `raw_lines_summary` views
|
||||
-- which results in the number of missing shots being miscounted on jobs
|
||||
-- using three sources.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
|
||||
CREATE OR REPLACE VIEW raw_lines_summary AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT rs.sequence,
|
||||
first_value(rs.point) OVER w AS fsp,
|
||||
last_value(rs.point) OVER w AS lsp,
|
||||
first_value(rs.tstamp) OVER w AS ts0,
|
||||
last_value(rs.tstamp) OVER w AS ts1,
|
||||
count(rs.point) OVER w AS num_points,
|
||||
count(pp.point) OVER w AS num_preplots,
|
||||
public.st_distance(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) AS length,
|
||||
((public.st_azimuth(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
|
||||
FROM (raw_shots rs
|
||||
LEFT JOIN preplot_points pp USING (line, point))
|
||||
WINDOW w AS (PARTITION BY rs.sequence ORDER BY rs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT rl.sequence,
|
||||
rl.line,
|
||||
s.fsp,
|
||||
s.lsp,
|
||||
s.ts0,
|
||||
s.ts1,
|
||||
(s.ts1 - s.ts0) AS duration,
|
||||
s.num_points,
|
||||
s.num_preplots,
|
||||
(SELECT count(*) AS count
|
||||
FROM missing_sequence_raw_points
|
||||
WHERE missing_sequence_raw_points.sequence = s.sequence) AS missing_shots,
|
||||
s.length,
|
||||
s.azimuth,
|
||||
rl.remarks,
|
||||
rl.ntbp,
|
||||
rl.meta
|
||||
FROM (summary s
|
||||
JOIN raw_lines rl USING (sequence));
|
||||
|
||||
|
||||
CREATE OR REPLACE VIEW final_lines_summary AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT fs.sequence,
|
||||
first_value(fs.point) OVER w AS fsp,
|
||||
last_value(fs.point) OVER w AS lsp,
|
||||
first_value(fs.tstamp) OVER w AS ts0,
|
||||
last_value(fs.tstamp) OVER w AS ts1,
|
||||
count(fs.point) OVER w AS num_points,
|
||||
public.st_distance(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) AS length,
|
||||
((public.st_azimuth(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
|
||||
FROM final_shots fs
|
||||
WINDOW w AS (PARTITION BY fs.sequence ORDER BY fs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT fl.sequence,
|
||||
fl.line,
|
||||
s.fsp,
|
||||
s.lsp,
|
||||
s.ts0,
|
||||
s.ts1,
|
||||
(s.ts1 - s.ts0) AS duration,
|
||||
s.num_points,
|
||||
( SELECT count(*) AS count
|
||||
FROM missing_sequence_final_points
|
||||
WHERE missing_sequence_final_points.sequence = s.sequence) AS missing_shots,
|
||||
s.length,
|
||||
s.azimuth,
|
||||
fl.remarks,
|
||||
fl.meta
|
||||
FROM (summary s
|
||||
JOIN final_lines fl USING (sequence));
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.3.13' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.3.12' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.13"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.3.13"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,122 @@
|
||||
-- Fix wrong number of missing shots in summary views
|
||||
--
|
||||
-- New schema version: 0.4.0
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This adapts the schema to the change in how project configurations are
|
||||
-- handled (https://gitlab.com/wgp/dougal/software/-/merge_requests/29)
|
||||
-- by creating a project_configuration() function which returns the
|
||||
-- current project's configuration data.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION project_configuration()
|
||||
RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
schema_name text;
|
||||
configuration jsonb;
|
||||
BEGIN
|
||||
|
||||
SELECT nspname
|
||||
INTO schema_name
|
||||
FROM pg_namespace
|
||||
WHERE oid = (
|
||||
SELECT pronamespace
|
||||
FROM pg_proc
|
||||
WHERE oid = 'project_configuration'::regproc::oid
|
||||
);
|
||||
|
||||
SELECT meta
|
||||
INTO configuration
|
||||
FROM public.projects
|
||||
WHERE schema = schema_name;
|
||||
|
||||
RETURN configuration;
|
||||
END
|
||||
$$;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.4.0' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.3.12' AND current_db_version != '0.3.13' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.0"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.0"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,264 @@
|
||||
-- Fix wrong number of missing shots in summary views
|
||||
--
|
||||
-- New schema version: 0.4.1
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This modifies adjust_planner() to use project_configuration()
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
|
||||
CREATE OR REPLACE PROCEDURE adjust_planner()
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
_planner_config jsonb;
|
||||
_planned_line planned_lines%ROWTYPE;
|
||||
_lag interval;
|
||||
_last_sequence sequences_summary%ROWTYPE;
|
||||
_deltatime interval;
|
||||
_shotinterval interval;
|
||||
_tstamp timestamptz;
|
||||
_incr integer;
|
||||
BEGIN
|
||||
|
||||
SET CONSTRAINTS planned_lines_pkey DEFERRED;
|
||||
|
||||
SELECT project_configuration()->'planner'
|
||||
INTO _planner_config;
|
||||
|
||||
SELECT *
|
||||
INTO _last_sequence
|
||||
FROM sequences_summary
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT *
|
||||
INTO _planned_line
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
|
||||
|
||||
SELECT
|
||||
COALESCE(
|
||||
((lead(ts0) OVER (ORDER BY sequence)) - ts1),
|
||||
make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer)
|
||||
)
|
||||
INTO _lag
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
|
||||
|
||||
_incr = sign(_last_sequence.lsp - _last_sequence.fsp);
|
||||
|
||||
RAISE NOTICE '_planner_config: %', _planner_config;
|
||||
RAISE NOTICE '_last_sequence: %', _last_sequence;
|
||||
RAISE NOTICE '_planned_line: %', _planned_line;
|
||||
RAISE NOTICE '_incr: %', _incr;
|
||||
|
||||
-- Does the latest sequence match a planned sequence?
|
||||
IF _planned_line IS NULL THEN -- No it doesn't
|
||||
RAISE NOTICE 'Latest sequence shot does not match a planned sequence';
|
||||
SELECT * INTO _planned_line FROM planned_lines ORDER BY sequence ASC LIMIT 1;
|
||||
RAISE NOTICE '_planned_line: %', _planned_line;
|
||||
|
||||
IF _planned_line.sequence <= _last_sequence.sequence THEN
|
||||
RAISE NOTICE 'Renumbering the planned sequences starting from %', _planned_line.sequence + 1;
|
||||
-- Renumber the planned sequences starting from last shot sequence number + 1
|
||||
UPDATE planned_lines
|
||||
SET sequence = sequence + _last_sequence.sequence - _planned_line.sequence + 1;
|
||||
END IF;
|
||||
|
||||
-- The correction to make to the first planned line's ts0 will be based on either the last
|
||||
-- sequence's EOL + default line change time or the current time, whichever is later.
|
||||
_deltatime := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1) + make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer), current_timestamp) - _planned_line.ts0;
|
||||
|
||||
-- Is the first of the planned lines start time in the past? (±5 mins)
|
||||
IF _planned_line.ts0 < (current_timestamp - make_interval(mins => 5)) THEN
|
||||
RAISE NOTICE 'First planned line is in the past. Adjusting times by %', _deltatime;
|
||||
-- Adjust the start / end time of the planned lines by assuming that we are at
|
||||
-- `defaultLineChangeDuration` minutes away from SOL of the first planned line.
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
ts0 = ts0 + _deltatime,
|
||||
ts1 = ts1 + _deltatime;
|
||||
END IF;
|
||||
|
||||
ELSE -- Yes it does
|
||||
RAISE NOTICE 'Latest sequence does match a planned sequence: %, %', _planned_line.sequence, _planned_line.line;
|
||||
|
||||
-- Is it online?
|
||||
IF EXISTS(SELECT 1 FROM raw_lines_files WHERE sequence = _last_sequence.sequence AND hash = '*online*') THEN
|
||||
-- Yes it is
|
||||
RAISE NOTICE 'Sequence % is online', _last_sequence.sequence;
|
||||
|
||||
-- Let us get the SOL from the events log if we can
|
||||
RAISE NOTICE 'Trying to set fsp, ts0 from events log FSP, FGSP';
|
||||
WITH e AS (
|
||||
SELECT * FROM event_log
|
||||
WHERE
|
||||
sequence = _last_sequence.sequence
|
||||
AND ('FSP' = ANY(labels) OR 'FGSP' = ANY(labels))
|
||||
ORDER BY tstamp LIMIT 1
|
||||
)
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
fsp = COALESCE(e.point, fsp),
|
||||
ts0 = COALESCE(e.tstamp, ts0)
|
||||
FROM e
|
||||
WHERE planned_lines.sequence = _last_sequence.sequence;
|
||||
|
||||
-- Shot interval
|
||||
_shotinterval := (_last_sequence.ts1 - _last_sequence.ts0) / abs(_last_sequence.lsp - _last_sequence.fsp);
|
||||
|
||||
RAISE NOTICE 'Estimating EOL from current shot interval: %', _shotinterval;
|
||||
|
||||
SELECT (abs(lsp-fsp) * _shotinterval + ts0) - ts1
|
||||
INTO _deltatime
|
||||
FROM planned_lines
|
||||
WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
---- Set ts1 for the current sequence
|
||||
--UPDATE planned_lines
|
||||
--SET
|
||||
--ts1 = (abs(lsp-fsp) * _shotinterval) + ts0
|
||||
--WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
RAISE NOTICE 'Adjustment is %', _deltatime;
|
||||
|
||||
IF abs(EXTRACT(EPOCH FROM _deltatime)) < 8 THEN
|
||||
RAISE NOTICE 'Adjustment too small (< 8 s), so not applying it';
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Adjust ts1 for the current sequence
|
||||
UPDATE planned_lines
|
||||
SET ts1 = ts1 + _deltatime
|
||||
WHERE sequence = _last_sequence.sequence;
|
||||
|
||||
-- Now shift all sequences after
|
||||
UPDATE planned_lines
|
||||
SET ts0 = ts0 + _deltatime, ts1 = ts1 + _deltatime
|
||||
WHERE sequence > _last_sequence.sequence;
|
||||
|
||||
RAISE NOTICE 'Deleting planned sequences before %', _planned_line.sequence;
|
||||
-- Remove all previous planner entries.
|
||||
DELETE
|
||||
FROM planned_lines
|
||||
WHERE sequence < _last_sequence.sequence;
|
||||
|
||||
ELSE
|
||||
-- No it isn't
|
||||
RAISE NOTICE 'Sequence % is offline', _last_sequence.sequence;
|
||||
|
||||
-- We were supposed to finish at _planned_line.ts1 but we finished at:
|
||||
_tstamp := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1), current_timestamp);
|
||||
-- WARNING Next line is for testing only
|
||||
--_tstamp := COALESCE(_last_sequence.ts1_final, _last_sequence.ts1);
|
||||
-- So we need to adjust timestamps by:
|
||||
_deltatime := _tstamp - _planned_line.ts1;
|
||||
|
||||
RAISE NOTICE 'Planned end: %, actual end: % (%, %)', _planned_line.ts1, _tstamp, _planned_line.sequence, _last_sequence.sequence;
|
||||
RAISE NOTICE 'Shifting times by % for sequences > %', _deltatime, _planned_line.sequence;
|
||||
-- NOTE: This won't work if sequences are not, err… sequential.
|
||||
-- NOTE: This has been known to happen in 2020.
|
||||
UPDATE planned_lines
|
||||
SET
|
||||
ts0 = ts0 + _deltatime,
|
||||
ts1 = ts1 + _deltatime
|
||||
WHERE sequence > _planned_line.sequence;
|
||||
|
||||
RAISE NOTICE 'Deleting planned sequences up to %', _planned_line.sequence;
|
||||
-- Remove all previous planner entries.
|
||||
DELETE
|
||||
FROM planned_lines
|
||||
WHERE sequence <= _last_sequence.sequence;
|
||||
|
||||
END IF;
|
||||
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.4.1' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.4.0' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.1"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.1"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,98 @@
|
||||
-- Fix wrong number of missing shots in summary views
|
||||
--
|
||||
-- New schema version: 0.4.2
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This modifies binning_parameters() to use project_configuration()
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION binning_parameters() RETURNS jsonb
|
||||
LANGUAGE sql STABLE LEAKPROOF PARALLEL SAFE
|
||||
AS $$
|
||||
SELECT project_configuration()->'binning' binning;
|
||||
$$;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.4.2' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.4.1' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.2"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.2"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
164
etc/db/upgrades/upgrade30-v0.4.3-large-notification-payloads.sql
Normal file
164
etc/db/upgrades/upgrade30-v0.4.3-large-notification-payloads.sql
Normal file
@@ -0,0 +1,164 @@
|
||||
-- Support notification payloads larger than Postgres' NOTIFY limit.
|
||||
--
|
||||
-- New schema version: 0.4.3
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects the public schema only.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This creates a new table where large notification payloads are stored
|
||||
-- temporarily and from which they might be recalled by the notification
|
||||
-- listeners. It also creates a purge_notifications() procedure used to
|
||||
-- clean up old notifications from the notifications log and finally,
|
||||
-- modifies notify() to support these changes. When a large payload is
|
||||
-- encountered, the payload is stored in the notify_payloads table and
|
||||
-- a trimmed down version containing a notification_id is sent to listeners
|
||||
-- instead. Listeners can then query notify_payloads to retrieve the full
|
||||
-- payloads. It is the application layer's responsibility to delete old
|
||||
-- notifications.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_schema () AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating public schema';
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO public');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS public.notify_payloads (
|
||||
id SERIAL,
|
||||
tstamp timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
payload text NOT NULL DEFAULT '',
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS notify_payload_tstamp ON notify_payloads (tstamp);
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.notify() RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
channel text := TG_ARGV[0];
|
||||
pid text;
|
||||
payload text;
|
||||
notification text;
|
||||
payload_id integer;
|
||||
BEGIN
|
||||
|
||||
SELECT projects.pid INTO pid FROM projects WHERE schema = TG_TABLE_SCHEMA;
|
||||
|
||||
payload := json_build_object(
|
||||
'tstamp', CURRENT_TIMESTAMP,
|
||||
'operation', TG_OP,
|
||||
'schema', TG_TABLE_SCHEMA,
|
||||
'table', TG_TABLE_NAME,
|
||||
'old', row_to_json(OLD),
|
||||
'new', row_to_json(NEW),
|
||||
'pid', pid
|
||||
)::text;
|
||||
|
||||
IF octet_length(payload) < 1000 THEN
|
||||
PERFORM pg_notify(channel, payload);
|
||||
ELSE
|
||||
-- We need to find another solution
|
||||
-- FIXME Consider storing the payload in a temporary memory table,
|
||||
-- referenced by some form of autogenerated ID. Then send the ID
|
||||
-- as the payload and then it's up to the user to fetch the original
|
||||
-- payload if interested. This needs a mechanism to expire older payloads
|
||||
-- in the interest of conserving memory.
|
||||
|
||||
INSERT INTO notify_payloads (payload) VALUES (payload) RETURNING id INTO payload_id;
|
||||
|
||||
notification := json_build_object(
|
||||
'tstamp', CURRENT_TIMESTAMP,
|
||||
'operation', TG_OP,
|
||||
'schema', TG_TABLE_SCHEMA,
|
||||
'table', TG_TABLE_NAME,
|
||||
'pid', pid,
|
||||
'payload_id', payload_id
|
||||
)::text;
|
||||
|
||||
PERFORM pg_notify(channel, notification);
|
||||
RAISE INFO 'Payload over limit';
|
||||
END IF;
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE PROCEDURE public.purge_notifications (age_seconds numeric DEFAULT 120) AS $$
|
||||
DELETE FROM notify_payloads WHERE EXTRACT(epoch FROM CURRENT_TIMESTAMP - tstamp) > age_seconds;
|
||||
$$ LANGUAGE sql;
|
||||
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.4.3' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.4.2' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
-- This upgrade modified the `public` schema only, not individual
|
||||
-- project schemas.
|
||||
CALL pg_temp.upgrade_schema();
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_schema ();
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.3"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.3"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,104 @@
|
||||
-- Add event_log_changes function
|
||||
--
|
||||
-- New schema version: 0.4.4
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This adds a function event_log_changes which returns the subset of
|
||||
-- events from event_log_full which have been modified on or after a
|
||||
-- given timestamp.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION event_log_changes(ts0 timestamptz)
|
||||
RETURNS SETOF event_log_full
|
||||
LANGUAGE sql
|
||||
AS $$
|
||||
SELECT *
|
||||
FROM event_log_full
|
||||
WHERE lower(validity) > ts0 OR upper(validity) IS NOT NULL AND upper(validity) > ts0
|
||||
ORDER BY lower(validity);
|
||||
$$;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.4.4' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.4.3' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.4"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.4"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,147 @@
|
||||
-- Turn project_summary into a materialised view
|
||||
--
|
||||
-- New schema version: 0.4.5
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- The project_summary view is quite a bottleneck. While it itself is
|
||||
-- not the real culprit (rather the underlying views are), this is one
|
||||
-- relatively cheap way of improving responsiveness from the client's
|
||||
-- point of view.
|
||||
-- We leave the details of how / when to refresh the view to the non-
|
||||
-- database code.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
DROP VIEW project_summary;
|
||||
|
||||
CREATE MATERIALIZED VIEW project_summary AS
|
||||
WITH fls AS (
|
||||
SELECT
|
||||
avg((final_lines_summary.duration / ((final_lines_summary.num_points - 1))::double precision)) AS shooting_rate,
|
||||
avg((final_lines_summary.length / date_part('epoch'::text, final_lines_summary.duration))) AS speed,
|
||||
sum(final_lines_summary.duration) AS prod_duration,
|
||||
sum(final_lines_summary.length) AS prod_distance
|
||||
FROM final_lines_summary
|
||||
), project AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.name,
|
||||
p.schema
|
||||
FROM public.projects p
|
||||
WHERE (split_part(current_setting('search_path'::text), ','::text, 1) = p.schema)
|
||||
)
|
||||
SELECT
|
||||
project.pid,
|
||||
project.name,
|
||||
project.schema,
|
||||
( SELECT count(*) AS count
|
||||
FROM preplot_lines
|
||||
WHERE (preplot_lines.class = 'V'::bpchar)) AS lines,
|
||||
ps.total,
|
||||
ps.virgin,
|
||||
ps.prime,
|
||||
ps.other,
|
||||
ps.ntba,
|
||||
ps.remaining,
|
||||
( SELECT to_json(fs.*) AS to_json
|
||||
FROM final_shots fs
|
||||
ORDER BY fs.tstamp
|
||||
LIMIT 1) AS fsp,
|
||||
( SELECT to_json(fs.*) AS to_json
|
||||
FROM final_shots fs
|
||||
ORDER BY fs.tstamp DESC
|
||||
LIMIT 1) AS lsp,
|
||||
( SELECT count(*) AS count
|
||||
FROM raw_lines rl) AS seq_raw,
|
||||
( SELECT count(*) AS count
|
||||
FROM final_lines rl) AS seq_final,
|
||||
fls.prod_duration,
|
||||
fls.prod_distance,
|
||||
fls.speed AS shooting_rate
|
||||
FROM preplot_summary ps,
|
||||
fls,
|
||||
project;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.4.5' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.4.4' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.5"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.4.5"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
164
etc/db/upgrades/upgrade33-v0.5.0-sailline-ancillary-data.sql
Normal file
164
etc/db/upgrades/upgrade33-v0.5.0-sailline-ancillary-data.sql
Normal file
@@ -0,0 +1,164 @@
|
||||
-- Sailline ancillary data
|
||||
--
|
||||
-- New schema version: 0.5.0
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- Issue #264 calls for associating sail and acquisition lines as well
|
||||
-- as indicating expected acquisition direction, and other data which
|
||||
-- cannot be provided via standard import formats such as SPS or P1/90.
|
||||
--
|
||||
-- We support this via an additional table that holds most of the required
|
||||
-- data. This data can simply be inferred from regular preplots, e.g., line
|
||||
-- direction can be deduced from preplot point order, and sail / source
|
||||
-- line offsets can be taken from P1/90 headers or from a configuration
|
||||
-- parameter. Alternatively, and in preference, the data can be provided
|
||||
-- explicitly, which is what issue #264 asks for.
|
||||
--
|
||||
-- In principle, this makes at least some of the attributes of `preplot_lines`
|
||||
-- redundant (at least `incr` and `ntba`) but we will leave them there for
|
||||
-- the time being as technical debt.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS preplot_saillines
|
||||
(
|
||||
sailline integer NOT NULL,
|
||||
line integer NOT NULL,
|
||||
sailline_class character(1) NOT NULL,
|
||||
line_class character(1) NOT NULL,
|
||||
incr boolean NOT NULL DEFAULT true,
|
||||
ntba boolean NOT NULL DEFAULT false,
|
||||
remarks text NOT NULL DEFAULT '',
|
||||
meta jsonb NOT NULL DEFAULT '{}'::jsonb,
|
||||
hash text NULL, -- Theoretically the info in this table could all be inferred.
|
||||
PRIMARY KEY (sailline, sailline_class, line, line_class, incr),
|
||||
CONSTRAINT fk_sailline FOREIGN KEY (sailline, sailline_class)
|
||||
REFERENCES preplot_lines (line, class)
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE CASCADE,
|
||||
CONSTRAINT fk_line FOREIGN KEY (line, line_class)
|
||||
REFERENCES preplot_lines (line, class)
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE CASCADE,
|
||||
CONSTRAINT fk_hash FOREIGN KEY (hash)
|
||||
REFERENCES files (hash) MATCH SIMPLE
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE CASCADE,
|
||||
CHECK (sailline_class = 'V' AND sailline_class != line_class)
|
||||
);
|
||||
|
||||
COMMENT ON TABLE preplot_saillines
|
||||
IS 'We explicitly associate each preplot sailline (aka vessel line) with zero or more source lines. This information can be inferred from preplot files, e.g., via a sailline offset value, or explicitly provided.';
|
||||
|
||||
-- Let us copy whatever information we can from existing tables or views
|
||||
|
||||
INSERT INTO preplot_saillines
|
||||
(sailline, line, sailline_class, line_class, incr, ntba, remarks, meta)
|
||||
SELECT DISTINCT
|
||||
sailline, psp.line, 'V' sailline_class, psp.class line_class, pl.incr, pl.ntba, pl.remarks, pl.meta
|
||||
FROM preplot_saillines_points psp
|
||||
INNER JOIN preplot_lines pl ON psp.sailline = pl.line AND pl.class = 'V'
|
||||
ORDER BY sailline
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- We need to recreate the preplot_saillines_points view
|
||||
|
||||
CREATE OR REPLACE VIEW preplot_saillines_points AS
|
||||
SELECT psl.sailline,
|
||||
psl.ntba AS sailline_ntba,
|
||||
psl.line,
|
||||
pps.point,
|
||||
pps.class,
|
||||
pps.ntba,
|
||||
pps.geometry,
|
||||
pps.meta
|
||||
FROM preplot_saillines psl
|
||||
INNER JOIN preplot_points pps
|
||||
ON psl.line = pps.line AND psl.line_class = pps.class;
|
||||
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.5.0' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.4.5' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.0"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.5.0"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
119
etc/db/upgrades/upgrade34-v0.5.1-fix-sequences-detail-view.sql
Normal file
119
etc/db/upgrades/upgrade34-v0.5.1-fix-sequences-detail-view.sql
Normal file
@@ -0,0 +1,119 @@
|
||||
-- Sailline ancillary data
|
||||
--
|
||||
-- New schema version: 0.5.1
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- The sequences_detail view wrongly associates source lines and shot
|
||||
-- points when it should be associating saillines and shot points instead.
|
||||
--
|
||||
-- This updates fixes that issue (#307).
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE VIEW sequences_detail
|
||||
AS
|
||||
SELECT rl.sequence,
|
||||
rl.line AS sailline,
|
||||
rs.line,
|
||||
rs.point,
|
||||
rs.tstamp,
|
||||
rs.objref AS objrefraw,
|
||||
fs.objref AS objreffinal,
|
||||
st_transform(pp.geometry, 4326) AS geometrypreplot,
|
||||
st_transform(rs.geometry, 4326) AS geometryraw,
|
||||
st_transform(fs.geometry, 4326) AS geometryfinal,
|
||||
ij_error(rs.line::double precision, rs.point::double precision, rs.geometry) AS errorraw,
|
||||
ij_error(rs.line::double precision, rs.point::double precision, fs.geometry) AS errorfinal,
|
||||
json_build_object('preplot', pp.meta, 'raw', rs.meta, 'final', fs.meta) AS meta
|
||||
FROM raw_lines rl
|
||||
INNER JOIN preplot_saillines psl ON rl.line = psl.sailline
|
||||
INNER JOIN raw_shots rs ON rs.sequence = rl.sequence AND rs.line = psl.line
|
||||
INNER JOIN preplot_points pp ON psl.line = pp.line AND psl.line_class = pp.class AND rs.point = pp.point
|
||||
LEFT JOIN final_shots fs ON rl.sequence = fs.sequence AND rs.point = fs.point;
|
||||
|
||||
ALTER TABLE sequences_detail
|
||||
OWNER TO postgres;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.5.1' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.5.0' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.1"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.5.1"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,145 @@
|
||||
-- Fix preplot_lines_summary view
|
||||
--
|
||||
-- New schema version: 0.5.2
|
||||
--
|
||||
-- WARNING: This update is buggy and does not give the desired
|
||||
-- results. Schema version 0.5.4 fixes this.
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- Following introduction of `preplot_saillines` (0.5.0), the incr and
|
||||
-- ntba statuses are stored in a separate table, not in `preplot_lines`
|
||||
-- (TODO: a future upgrade should remove those columns from `preplot_lines`)
|
||||
--
|
||||
-- Now any views referencing `incr` and `ntba` must be updated to point to
|
||||
-- the new location of those attributes.
|
||||
--
|
||||
-- This update fixes #312.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE VIEW preplot_lines_summary
|
||||
AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT pp.line, pp.class,
|
||||
first_value(pp.point) OVER w AS p0,
|
||||
last_value(pp.point) OVER w AS p1,
|
||||
count(pp.point) OVER w AS num_points,
|
||||
st_distance(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) AS length,
|
||||
st_azimuth(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth0,
|
||||
st_azimuth(last_value(pp.geometry) OVER w, first_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth1
|
||||
FROM preplot_points pp
|
||||
WHERE pp.class = 'V'::bpchar
|
||||
WINDOW w AS (PARTITION BY pp.line ORDER BY pp.point ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT psl.line,
|
||||
CASE
|
||||
WHEN psl.incr THEN s.p0
|
||||
ELSE s.p1
|
||||
END AS fsp,
|
||||
CASE
|
||||
WHEN psl.incr THEN s.p1
|
||||
ELSE s.p0
|
||||
END AS lsp,
|
||||
s.num_points,
|
||||
s.length,
|
||||
CASE
|
||||
WHEN psl.incr THEN s.azimuth0
|
||||
ELSE s.azimuth1
|
||||
END AS azimuth,
|
||||
psl.incr,
|
||||
psl.remarks
|
||||
FROM summary s
|
||||
JOIN preplot_saillines psl ON psl.sailline_class = s.class AND s.line = psl.line
|
||||
ORDER BY psl.line, incr;
|
||||
|
||||
|
||||
ALTER TABLE preplot_lines_summary
|
||||
OWNER TO postgres;
|
||||
COMMENT ON VIEW preplot_lines_summary
|
||||
IS 'Summarises ''V'' (vessel sailline) preplot lines.';
|
||||
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.5.2' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.5.1' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.2"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.5.2"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,132 @@
|
||||
-- Fix final_lines_summary view
|
||||
--
|
||||
-- New schema version: 0.5.3
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This fixes a long-standing bug, where if the sail and source lines are
|
||||
-- the same, the number of missing shots will be miscounted.
|
||||
--
|
||||
-- This update fixes #313.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE VIEW final_lines_summary
|
||||
AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT fs.sequence,
|
||||
first_value(fs.point) OVER w AS fsp,
|
||||
last_value(fs.point) OVER w AS lsp,
|
||||
first_value(fs.tstamp) OVER w AS ts0,
|
||||
last_value(fs.tstamp) OVER w AS ts1,
|
||||
count(fs.point) OVER w AS num_points,
|
||||
count(pp.point) OVER w AS num_preplots,
|
||||
st_distance(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) AS length,
|
||||
st_azimuth(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) * 180::double precision / pi() AS azimuth
|
||||
FROM final_shots fs
|
||||
LEFT JOIN preplot_points pp USING (line, point)
|
||||
WINDOW w AS (PARTITION BY fs.sequence ORDER BY fs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT fl.sequence,
|
||||
fl.line,
|
||||
s.fsp,
|
||||
s.lsp,
|
||||
s.ts0,
|
||||
s.ts1,
|
||||
s.ts1 - s.ts0 AS duration,
|
||||
s.num_points,
|
||||
(( SELECT count(*) AS count
|
||||
FROM preplot_points
|
||||
WHERE preplot_points.line = fl.line AND (preplot_points.point >= s.fsp AND preplot_points.point <= s.lsp OR preplot_points.point >= s.lsp AND preplot_points.point <= s.fsp))) - s.num_preplots AS missing_shots,
|
||||
s.length,
|
||||
s.azimuth,
|
||||
fl.remarks,
|
||||
fl.meta
|
||||
FROM summary s
|
||||
JOIN final_lines fl USING (sequence);
|
||||
|
||||
ALTER TABLE final_lines_summary
|
||||
OWNER TO postgres;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.5.3' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.5.2' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.3"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.5.3"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,145 @@
|
||||
-- Fix preplot_lines_summary view
|
||||
--
|
||||
-- New schema version: 0.5.4
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade affects all schemas in the database.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- Fixes upgrade 35 (0.5.2). The original description of 0.5.2 is included
|
||||
-- below for ease of reference:
|
||||
--
|
||||
-- Following introduction of `preplot_saillines` (0.5.0), the incr and
|
||||
-- ntba statuses are stored in a separate table, not in `preplot_lines`
|
||||
-- (TODO: a future upgrade should remove those columns from `preplot_lines`)
|
||||
--
|
||||
-- Now any views referencing `incr` and `ntba` must be updated to point to
|
||||
-- the new location of those attributes.
|
||||
--
|
||||
-- This update fixes #312.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', schema_name;
|
||||
-- We need to set the search path because some of the trigger
|
||||
-- functions reference other tables in survey schemas assuming
|
||||
-- they are in the search path.
|
||||
EXECUTE format('SET search_path TO %I,public', schema_name);
|
||||
|
||||
CREATE OR REPLACE VIEW preplot_lines_summary
|
||||
AS
|
||||
WITH summary AS (
|
||||
SELECT DISTINCT pp.line,
|
||||
pp.class,
|
||||
first_value(pp.point) OVER w AS p0,
|
||||
last_value(pp.point) OVER w AS p1,
|
||||
count(pp.point) OVER w AS num_points,
|
||||
st_distance(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) AS length,
|
||||
st_azimuth(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth0,
|
||||
st_azimuth(last_value(pp.geometry) OVER w, first_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth1
|
||||
FROM preplot_points pp
|
||||
WHERE pp.class = 'V'::bpchar
|
||||
WINDOW w AS (PARTITION BY pp.line ORDER BY pp.point ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
)
|
||||
SELECT DISTINCT psl.sailline AS line,
|
||||
CASE
|
||||
WHEN psl.incr THEN s.p0
|
||||
ELSE s.p1
|
||||
END AS fsp,
|
||||
CASE
|
||||
WHEN psl.incr THEN s.p1
|
||||
ELSE s.p0
|
||||
END AS lsp,
|
||||
s.num_points,
|
||||
s.length,
|
||||
CASE
|
||||
WHEN psl.incr THEN s.azimuth0
|
||||
ELSE s.azimuth1
|
||||
END AS azimuth,
|
||||
psl.incr,
|
||||
psl.remarks
|
||||
FROM summary s
|
||||
JOIN preplot_saillines psl ON psl.sailline_class = s.class AND s.line = psl.sailline
|
||||
ORDER BY psl.sailline, psl.incr;
|
||||
|
||||
ALTER TABLE preplot_lines_summary
|
||||
OWNER TO postgres;
|
||||
COMMENT ON VIEW preplot_lines_summary
|
||||
IS 'Summarises ''V'' (vessel sailline) preplot lines.';
|
||||
|
||||
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.5.4' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.5.3' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
FOR row IN
|
||||
SELECT schema_name FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'survey_%'
|
||||
ORDER BY schema_name
|
||||
LOOP
|
||||
CALL pg_temp.upgrade_survey_schema(row.schema_name);
|
||||
END LOOP;
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.4"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.5.4"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
110
etc/db/upgrades/upgrade38-v0.6.0-add-keystore-table.sql
Normal file
110
etc/db/upgrades/upgrade38-v0.6.0-add-keystore-table.sql
Normal file
@@ -0,0 +1,110 @@
|
||||
-- Fix final_lines_summary view
|
||||
--
|
||||
-- New schema version: 0.6.0
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade only affects the `public` schema.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This update adds a `keystore` table, intended for storing arbitrary
|
||||
-- key / value pairs which, unlike, the `info` tables, is not meant to
|
||||
-- be directly accessible via the API. Its main purpose as of this writing
|
||||
-- is to store user definitions (see #176, #177, #180).
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', 'public';
|
||||
SET search_path TO public;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS keystore (
|
||||
type TEXT NOT NULL, -- A class of data to be stored
|
||||
key TEXT NOT NULL, -- A key that is unique for the class and access type
|
||||
last_modified TIMESTAMP -- To detect update conflicts
|
||||
DEFAULT CURRENT_TIMESTAMP,
|
||||
data jsonb,
|
||||
PRIMARY KEY (type, key) -- Composite primary key
|
||||
);
|
||||
|
||||
-- Create a function to update the last_modified timestamp
|
||||
CREATE OR REPLACE FUNCTION update_last_modified()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.last_modified = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Create a trigger that calls the function before each update
|
||||
CREATE OR REPLACE TRIGGER update_keystore_last_modified
|
||||
BEFORE UPDATE ON keystore
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_last_modified();
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.6.0' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.5.4' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
CALL pg_temp.upgrade_database();
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_database ();
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.0"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.6.0"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
108
etc/db/upgrades/upgrade39-v0.6.1-add-default-user.sql
Normal file
108
etc/db/upgrades/upgrade39-v0.6.1-add-default-user.sql
Normal file
@@ -0,0 +1,108 @@
|
||||
-- Fix final_lines_summary view
|
||||
--
|
||||
-- New schema version: 0.6.1
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade only affects the `public` schema.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This update adds a default user to the system (see #176, #177, #180).
|
||||
-- The default user can only be invoked by connecting from localhost.
|
||||
--
|
||||
-- This user has full access to every project via the organisations
|
||||
-- permissions wildcard: `{"*": {read: true, write: true, edit: true}}`
|
||||
-- and can be used to bootstrap the system by creating other users
|
||||
-- and assigning organisational permissions.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', 'public';
|
||||
SET search_path TO public;
|
||||
|
||||
INSERT INTO keystore (type, key, data)
|
||||
VALUES ('user', '6f1e7159-4ca0-4ae4-ab4e-89078166cc10', '
|
||||
{
|
||||
"id": "6f1e7159-4ca0-4ae4-ab4e-89078166cc10",
|
||||
"ip": "127.0.0.0/24",
|
||||
"name": "☠️",
|
||||
"colour": "red",
|
||||
"active": true,
|
||||
"organisations": {
|
||||
"*": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"edit": true
|
||||
}
|
||||
}
|
||||
}
|
||||
'::jsonb)
|
||||
ON CONFLICT (type, key) DO NOTHING;
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.6.1' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.6.0' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
CALL pg_temp.upgrade_database();
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_database ();
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.1"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.6.1"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
@@ -0,0 +1,106 @@
|
||||
-- Fix final_lines_summary view
|
||||
--
|
||||
-- New schema version: 0.6.2
|
||||
--
|
||||
-- ATTENTION:
|
||||
--
|
||||
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
|
||||
--
|
||||
--
|
||||
-- NOTE: This upgrade only affects the `public` schema.
|
||||
-- NOTE: Each application starts a transaction, which must be committed
|
||||
-- or rolled back.
|
||||
--
|
||||
-- This update adds an "organisations" section to the configuration,
|
||||
-- with a default configured organisation of "WGP" with full access.
|
||||
-- This is so that projects can be made accessible after migrating
|
||||
-- to the new permissions architecture.
|
||||
--
|
||||
-- In addition, projects with an id starting with "eq" are assumed to
|
||||
-- be Equinor projects, and an additional organisation is added with
|
||||
-- read-only access. This is intended for clients, which should be
|
||||
-- assigned to the "Equinor organisation".
|
||||
--
|
||||
-- Finally, we assign the vessel to the "WGP" organisation (full access)
|
||||
-- so that we can actually use administrative endpoints.
|
||||
--
|
||||
-- To apply, run as the dougal user:
|
||||
--
|
||||
-- psql <<EOF
|
||||
-- \i $THIS_FILE
|
||||
-- COMMIT;
|
||||
-- EOF
|
||||
--
|
||||
-- NOTE: It can be applied multiple times without ill effect.
|
||||
--
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', notice;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
|
||||
BEGIN
|
||||
|
||||
RAISE NOTICE 'Updating schema %', 'public';
|
||||
SET search_path TO public;
|
||||
|
||||
-- Add "organisations" section to configurations, if not already present
|
||||
UPDATE projects
|
||||
SET
|
||||
meta = jsonb_set(meta, '{organisations}', '{"WGP": {"read": true, "write": true, "edit": true}}'::jsonb, true)
|
||||
WHERE meta->'organisations' IS NULL;
|
||||
|
||||
-- Add (or overwrite!) "organisations.Equinor" giving read-only access (can be changed later via API)
|
||||
UPDATE projects
|
||||
SET
|
||||
meta = jsonb_set(meta, '{organisations, Equinor}', '{"read": true, "write": false, "edit": false}'::jsonb, true)
|
||||
WHERE pid LIKE 'eq%';
|
||||
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
|
||||
DECLARE
|
||||
row RECORD;
|
||||
current_db_version TEXT;
|
||||
BEGIN
|
||||
|
||||
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
|
||||
|
||||
IF current_db_version >= '0.6.2' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Patch already applied';
|
||||
END IF;
|
||||
|
||||
IF current_db_version != '0.6.1' THEN
|
||||
RAISE EXCEPTION
|
||||
USING MESSAGE='Invalid database version: ' || current_db_version,
|
||||
HINT='Ensure all previous patches have been applied.';
|
||||
END IF;
|
||||
|
||||
CALL pg_temp.upgrade_database();
|
||||
END;
|
||||
$outer$ LANGUAGE plpgsql;
|
||||
|
||||
CALL pg_temp.upgrade();
|
||||
|
||||
CALL pg_temp.show_notice('Cleaning up');
|
||||
DROP PROCEDURE pg_temp.upgrade_database ();
|
||||
DROP PROCEDURE pg_temp.upgrade ();
|
||||
|
||||
CALL pg_temp.show_notice('Updating db_schema version');
|
||||
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.2"}')
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = public.info.value || '{"db_schema": "0.6.2"}' WHERE public.info.key = 'version';
|
||||
|
||||
|
||||
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
|
||||
DROP PROCEDURE pg_temp.show_notice (notice text);
|
||||
|
||||
--
|
||||
--NOTE Run `COMMIT;` now if all went well
|
||||
--
|
||||
245
etc/default/templates/plan.html.njk
Normal file
245
etc/default/templates/plan.html.njk
Normal file
File diff suppressed because one or more lines are too long
344
etc/default/templates/sequence.html.njk
Executable file
344
etc/default/templates/sequence.html.njk
Executable file
File diff suppressed because one or more lines are too long
@@ -7,34 +7,51 @@
|
||||
id: missing_shots
|
||||
check: |
|
||||
const sequence = currentItem;
|
||||
const sp0 = Math.min(sequence.fsp, sequence.lsp);
|
||||
const sp1 = Math.max(sequence.fsp, sequence.lsp);
|
||||
const missing = preplots.filter(r => r.line == sequence.line &&
|
||||
r.point >= sp0 && r.point <= sp1 &&
|
||||
!sequence.shots.find(s => s.point == r.point)
|
||||
);
|
||||
let results;
|
||||
if (sequence.missing_shots) {
|
||||
results = {
|
||||
shots: {}
|
||||
}
|
||||
const missing_shots = missingShotpoints.filter(i => !i.ntba);
|
||||
for (const shot of missing_shots) {
|
||||
results.shots[shot.point] = { remarks: "Missed shot", labels: [ "QC", "QCAcq" ] };
|
||||
}
|
||||
} else {
|
||||
results = true;
|
||||
}
|
||||
|
||||
missing.length == 0 || missing.map(r => `Missing shot: ${r.point}`).join("\n")
|
||||
results;
|
||||
-
|
||||
name: "Gun QC"
|
||||
disabled: false
|
||||
labels: [ "QC", "QCGuns" ]
|
||||
children:
|
||||
-
|
||||
name: "Sequences without gun data"
|
||||
iterate: "sequences"
|
||||
id: seq_no_gun_data
|
||||
check: |
|
||||
shotpoints.some(i => i.meta?.raw?.smsrc) || "Sequence has no gun data"
|
||||
-
|
||||
name: "Missing gun data"
|
||||
id: missing_gun_data
|
||||
ignoreAllFailed: true
|
||||
check: |
|
||||
!!currentItem._("raw_meta.smsrc.guns") || "Missing gun data"
|
||||
!!currentItem._("raw_meta.smsrc.guns")
|
||||
? true
|
||||
: "Missing gun data"
|
||||
|
||||
-
|
||||
name: "No fire"
|
||||
id: no_fire
|
||||
check: |
|
||||
const currentShot = currentItem;
|
||||
const gunData = currentItem._("raw_meta.smsrc");
|
||||
(gunData && gunData.num_nofire != 0)
|
||||
? `Source ${gunData.src_number}: No fire (${gunData.num_nofire} guns)`
|
||||
: true;
|
||||
// const currentShot = currentItem;
|
||||
// const gunData = currentItem._("raw_meta.smsrc");
|
||||
// (gunData && gunData.guns && gunData.guns.length != gunData.num_active)
|
||||
// ? `Source ${gunData.src_number}: No fire (${gunData.guns.length - gunData.num_active} guns)`
|
||||
// : true;
|
||||
// Disabled due to changes in Smartsource software. It now returns all guns on every shot, not just active ones.
|
||||
true
|
||||
|
||||
-
|
||||
name: "Pressure errors"
|
||||
@@ -47,8 +64,8 @@
|
||||
.guns
|
||||
.filter(gun => ((gun[2] == gunData.src_number) && (gun[pressure]/parameters.gunPressureNominal - 1) > parameters.gunPressureToleranceRatio))
|
||||
.map(gun =>
|
||||
`source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}, pressure: ${gun[pressure]} / ${parameters.gunPressureNominal} = ${(Math.abs(gunData.manifold/parameters.gunPressureNominal - 1)*100).toFixed(1)}% > ${(parameters.gunPressureToleranceRatio*100).toFixed(1)}%`
|
||||
);
|
||||
`source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}, pressure: ${gun[pressure]} / ${parameters.gunPressureNominal} = ${(Math.abs(gun[pressure]/parameters.gunPressureNominal - 1)*100).toFixed(2)}% > ${(parameters.gunPressureToleranceRatio*100).toFixed(2)}%`
|
||||
).join(" \n");
|
||||
results && results.length
|
||||
? results
|
||||
: true
|
||||
@@ -150,7 +167,7 @@
|
||||
.filter(gun => Math.abs(gun[firetime]-gun[aimpoint]) >= parameters.gunTimingWarning && Math.abs(gun[firetime]-gun[aimpoint]) <= parameters.gunTiming)
|
||||
.forEach(gun => {
|
||||
const value = Math.abs(gun[firetime]-gun[aimpoint]);
|
||||
result.push(`Delta error: source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}: ${parameters.gunTimingWarning} ≤ ${value.toFixed(2)} ≤ ${parameters.gunTiming}`);
|
||||
result.push(`Delta warning: source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}: ${parameters.gunTimingWarning} ≤ ${value.toFixed(2)} ≤ ${parameters.gunTiming}`);
|
||||
});
|
||||
}
|
||||
if (result.length) {
|
||||
@@ -192,7 +209,7 @@
|
||||
check: |
|
||||
const currentShot = currentItem;
|
||||
Math.abs(currentShot.error_i) <= parameters.crosslineError
|
||||
|| `Crossline error: ${currentShot.error_i.toFixed(1)} > ${parameters.crosslineError}`
|
||||
|| `Crossline error (${currentShot.type}): ${currentShot.error_i.toFixed(2)} > ${parameters.crosslineError}`
|
||||
|
||||
-
|
||||
name: "Inline"
|
||||
@@ -200,7 +217,7 @@
|
||||
check: |
|
||||
const currentShot = currentItem;
|
||||
Math.abs(currentShot.error_j) <= parameters.inlineError
|
||||
|| `Inline error: ${currentShot.error_j.toFixed(1)} > ${parameters.inlineError}`
|
||||
|| `Inline error (${currentShot.type}): ${currentShot.error_j.toFixed(2)} > ${parameters.inlineError}`
|
||||
|
||||
-
|
||||
name: "Centre of source preplot deviation (moving average)"
|
||||
@@ -213,11 +230,16 @@
|
||||
id: crossline_average
|
||||
check: |
|
||||
const currentSequence = currentItem;
|
||||
const i_err = currentSequence.shots.filter(s => s.error_i != null).map(a => a.error_i);
|
||||
//const i_err = shotpoints.filter(s => s.error_i != null).map(a => a.error_i);
|
||||
const i_err = shotpoints.map(i =>
|
||||
(i.errorfinal?.coordinates ?? i.errorraw?.coordinates)[0]
|
||||
)
|
||||
.filter(i => !isNaN(i));
|
||||
|
||||
if (i_err.length) {
|
||||
const avg = i_err.reduce( (a, b) => a+b)/i_err.length;
|
||||
avg <= parameters.crosslineErrorAverage ||
|
||||
`Average crossline error: ${avg.toFixed(1)} > ${parameters.crosslineErrorAverage}`
|
||||
`Average crossline error: ${avg.toFixed(2)} > ${parameters.crosslineErrorAverage}`
|
||||
} else {
|
||||
`Sequence ${currentSequence.sequence} has no shots within preplot`
|
||||
}
|
||||
@@ -230,16 +252,27 @@
|
||||
check: |
|
||||
const currentSequence = currentItem;
|
||||
const n = parameters.inlineErrorRunningAverageShots; // For brevity
|
||||
const results = currentSequence.shots.slice(n/2, -n/2).map( (shot, index) => {
|
||||
const shots = currentSequence.shots.slice(index, index+n).map(i => i.error_j).filter(i => i !== null);
|
||||
const results = shotpoints.slice(n/2, -n/2).map( (shot, index) => {
|
||||
const shots = shotpoints.slice(index, index+n).map(i =>
|
||||
(i.errorfinal?.coordinates ?? i.errorraw?.coordinates)[1]
|
||||
).filter(i => i !== null);
|
||||
if (!shots.length) {
|
||||
// We are outside the preplot
|
||||
// Nothing to see here, move along
|
||||
return true;
|
||||
}
|
||||
const mean = shots.reduce( (a, b) => a+b ) / shots.length;
|
||||
return Math.abs(mean) <= parameters.inlineErrorRunningAverageValue ||
|
||||
`Running average inline error: shot ${shot.point}, ${mean.toFixed(1)} > ${parameters.inlineErrorRunningAverageValue}`
|
||||
return Math.abs(mean) <= parameters.inlineErrorRunningAverageValue || [
|
||||
shot.point,
|
||||
{
|
||||
remarks: `Running average inline error: ${mean.toFixed(2)} > ${parameters.inlineErrorRunningAverageValue}`,
|
||||
labels: [ "QC", "QCNav" ]
|
||||
}
|
||||
]
|
||||
}).filter(i => i !== true);
|
||||
|
||||
results.length == 0 || results.join("\n");
|
||||
results.length == 0 || {
|
||||
remarks: "Sequence exceeds inline error running average limit",
|
||||
shots: Object.fromEntries(results)
|
||||
}
|
||||
|
||||
3
etc/ssl/README.md
Normal file
3
etc/ssl/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# TLS certificates directory
|
||||
|
||||
Drop TLS certificates required by Dougal in this directory. It is excluded by [`.gitignore`](../../.gitignore) so its contents should never be committed by accident (and shouldn't be committed on purpose!).
|
||||
@@ -1,6 +1,9 @@
|
||||
{
|
||||
"jwt": {
|
||||
"secret": ""
|
||||
"secret": "",
|
||||
"options": {
|
||||
"expiresIn": 1800
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"user": "postgres",
|
||||
907
lib/modules/@dougal/binary/classes.js
Normal file
907
lib/modules/@dougal/binary/classes.js
Normal file
@@ -0,0 +1,907 @@
|
||||
const codeToType = {
|
||||
0: Int8Array,
|
||||
1: Uint8Array,
|
||||
2: Int16Array,
|
||||
3: Uint16Array,
|
||||
4: Int32Array,
|
||||
5: Uint32Array,
|
||||
7: Float32Array,
|
||||
8: Float64Array,
|
||||
9: BigInt64Array,
|
||||
10: BigUint64Array
|
||||
};
|
||||
|
||||
const typeToBytes = {
|
||||
Int8Array: 1,
|
||||
Uint8Array: 1,
|
||||
Int16Array: 2,
|
||||
Uint16Array: 2,
|
||||
Int32Array: 4,
|
||||
Uint32Array: 4,
|
||||
Float32Array: 4,
|
||||
Float64Array: 8,
|
||||
BigInt64Array: 8,
|
||||
BigUint64Array: 8
|
||||
};
|
||||
|
||||
function readTypedValue(view, offset, type) {
|
||||
switch (type) {
|
||||
case Int8Array: return view.getInt8(offset);
|
||||
case Uint8Array: return view.getUint8(offset);
|
||||
case Int16Array: return view.getInt16(offset, true);
|
||||
case Uint16Array: return view.getUint16(offset, true);
|
||||
case Int32Array: return view.getInt32(offset, true);
|
||||
case Uint32Array: return view.getUint32(offset, true);
|
||||
case Float32Array: return view.getFloat32(offset, true);
|
||||
case Float64Array: return view.getFloat64(offset, true);
|
||||
case BigInt64Array: return view.getBigInt64(offset, true);
|
||||
case BigUint64Array: return view.getBigUint64(offset, true);
|
||||
default: throw new Error(`Unsupported type: ${type.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
function writeTypedValue(view, offset, value, type) {
|
||||
switch (type) {
|
||||
case Int8Array: view.setInt8(offset, value); break;
|
||||
case Uint8Array: view.setUint8(offset, value); break;
|
||||
case Int16Array: view.setInt16(offset, value, true); break;
|
||||
case Uint16Array: view.setUint16(offset, value, true); break;
|
||||
case Int32Array: view.setInt32(offset, value, true); break;
|
||||
case Uint32Array: view.setUint32(offset, value, true); break;
|
||||
case Float32Array: view.setFloat32(offset, value, true); break;
|
||||
case Float64Array: view.setFloat64(offset, value, true); break;
|
||||
case BigInt64Array: view.setBigInt64(offset, BigInt(value), true); break;
|
||||
case BigUint64Array: view.setBigUint64(offset, BigInt(value), true); break;
|
||||
default: throw new Error(`Unsupported type: ${type.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
class DougalBinaryBundle extends ArrayBuffer {
|
||||
|
||||
static HEADER_LENGTH = 4; // Length of a bundle header
|
||||
|
||||
/** Clone an existing ByteArray into a DougalBinaryBundle
|
||||
*/
|
||||
static clone (buffer) {
|
||||
const clone = new DougalBinaryBundle(buffer.byteLength);
|
||||
const uint8Array = new Uint8Array(buffer);
|
||||
const uint8ArrayClone = new Uint8Array(clone);
|
||||
uint8ArrayClone.set(uint8Array);
|
||||
return clone;
|
||||
}
|
||||
|
||||
constructor (length, options) {
|
||||
super (length, options);
|
||||
}
|
||||
|
||||
/** Get the count of bundles in this ByteArray.
|
||||
*
|
||||
* Stops at the first non-bundle looking offset
|
||||
*/
|
||||
get bundleCount () {
|
||||
let count = 0;
|
||||
let currentBundleOffset = 0;
|
||||
const view = new DataView(this);
|
||||
|
||||
while (currentBundleOffset < this.byteLength) {
|
||||
|
||||
const currentBundleHeader = view.getUint32(currentBundleOffset, true);
|
||||
if ((currentBundleHeader & 0xff) !== 0x1c) {
|
||||
// This is not a bundle
|
||||
return count;
|
||||
}
|
||||
let currentBundleLength = currentBundleHeader >>> 8;
|
||||
|
||||
currentBundleOffset += currentBundleLength + DougalBinaryBundle.HEADER_LENGTH;
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
/** Get the number of chunks in the bundles of this ByteArray
|
||||
*/
|
||||
get chunkCount () {
|
||||
let count = 0;
|
||||
let bundleOffset = 0;
|
||||
const view = new DataView(this);
|
||||
|
||||
while (bundleOffset < this.byteLength) {
|
||||
const header = view.getUint32(bundleOffset, true);
|
||||
if ((header & 0xFF) !== 0x1C) break;
|
||||
const length = header >>> 8;
|
||||
if (bundleOffset + 4 + length > this.byteLength) break;
|
||||
|
||||
let chunkOffset = bundleOffset + 4; // relative to buffer start
|
||||
|
||||
while (chunkOffset < bundleOffset + 4 + length) {
|
||||
const chunkType = view.getUint8(chunkOffset);
|
||||
if (chunkType !== 0x11 && chunkType !== 0x12) break;
|
||||
|
||||
const cCount = view.getUint16(chunkOffset + 2, true);
|
||||
const ΔelemC = view.getUint8(chunkOffset + 10);
|
||||
const elemC = view.getUint8(chunkOffset + 11);
|
||||
|
||||
let localOffset = 12; // header size
|
||||
|
||||
localOffset += ΔelemC + elemC; // preface
|
||||
|
||||
// initial values
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(chunkOffset + 12 + k);
|
||||
const baseCode = typeByte & 0xF;
|
||||
const baseType = codeToType[baseCode];
|
||||
if (!baseType) throw new Error('Invalid base type code');
|
||||
localOffset += typeToBytes[baseType.name];
|
||||
}
|
||||
|
||||
// pad after initial
|
||||
while (localOffset % 4 !== 0) localOffset++;
|
||||
|
||||
if (chunkType === 0x11) { // Sequential
|
||||
// record data: Δelems incrs
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(chunkOffset + 12 + k);
|
||||
const incrCode = typeByte >> 4;
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!incrType) throw new Error('Invalid incr type code');
|
||||
localOffset += cCount * typeToBytes[incrType.name];
|
||||
}
|
||||
|
||||
// elems
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
localOffset += cCount * typeToBytes[type.name];
|
||||
}
|
||||
} else { // Interleaved
|
||||
// Compute exact stride for interleaved record data
|
||||
let ΔelemStride = 0;
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(chunkOffset + 12 + k);
|
||||
const incrCode = typeByte >> 4;
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!incrType) throw new Error('Invalid incr type code');
|
||||
ΔelemStride += typeToBytes[incrType.name];
|
||||
}
|
||||
let elemStride = 0;
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
elemStride += typeToBytes[type.name];
|
||||
}
|
||||
const recordStride = ΔelemStride + elemStride;
|
||||
localOffset += cCount * recordStride;
|
||||
}
|
||||
|
||||
// pad after record
|
||||
while (localOffset % 4 !== 0) localOffset++;
|
||||
|
||||
chunkOffset += localOffset;
|
||||
count++;
|
||||
}
|
||||
|
||||
bundleOffset += 4 + length;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/** Return an array of DougalBinaryChunkSequential or DougalBinaryChunkInterleaved instances
|
||||
*/
|
||||
chunks () {
|
||||
const chunks = [];
|
||||
let bundleOffset = 0;
|
||||
const view = new DataView(this);
|
||||
|
||||
while (bundleOffset < this.byteLength) {
|
||||
const header = view.getUint32(bundleOffset, true);
|
||||
if ((header & 0xFF) !== 0x1C) break;
|
||||
const length = header >>> 8;
|
||||
if (bundleOffset + 4 + length > this.byteLength) break;
|
||||
|
||||
let chunkOffset = bundleOffset + 4;
|
||||
|
||||
while (chunkOffset < bundleOffset + 4 + length) {
|
||||
const chunkType = view.getUint8(chunkOffset);
|
||||
if (chunkType !== 0x11 && chunkType !== 0x12) break;
|
||||
|
||||
const cCount = view.getUint16(chunkOffset + 2, true);
|
||||
const ΔelemC = view.getUint8(chunkOffset + 10);
|
||||
const elemC = view.getUint8(chunkOffset + 11);
|
||||
|
||||
let localOffset = 12;
|
||||
|
||||
localOffset += ΔelemC + elemC;
|
||||
|
||||
// initial values
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(chunkOffset + 12 + k);
|
||||
const baseCode = typeByte & 0xF;
|
||||
const baseType = codeToType[baseCode];
|
||||
if (!baseType) throw new Error('Invalid base type code');
|
||||
localOffset += typeToBytes[baseType.name];
|
||||
}
|
||||
|
||||
// pad after initial
|
||||
while (localOffset % 4 !== 0) localOffset++;
|
||||
|
||||
if (chunkType === 0x11) { // Sequential
|
||||
// record data: Δelems incrs
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(chunkOffset + 12 + k);
|
||||
const incrCode = typeByte >> 4;
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!incrType) throw new Error('Invalid incr type code');
|
||||
localOffset += cCount * typeToBytes[incrType.name];
|
||||
}
|
||||
|
||||
// elems
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
localOffset += cCount * typeToBytes[type.name];
|
||||
}
|
||||
} else { // Interleaved
|
||||
// Compute exact stride for interleaved record data
|
||||
let ΔelemStride = 0;
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(chunkOffset + 12 + k);
|
||||
const incrCode = typeByte >> 4;
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!incrType) throw new Error('Invalid incr type code');
|
||||
ΔelemStride += typeToBytes[incrType.name];
|
||||
}
|
||||
let elemStride = 0;
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
elemStride += typeToBytes[type.name];
|
||||
}
|
||||
const recordStride = ΔelemStride + elemStride;
|
||||
localOffset += cCount * recordStride;
|
||||
}
|
||||
|
||||
// pad after record
|
||||
while (localOffset % 4 !== 0) localOffset++;
|
||||
|
||||
switch (chunkType) {
|
||||
case 0x11:
|
||||
chunks.push(new DougalBinaryChunkSequential(this, chunkOffset, localOffset));
|
||||
break;
|
||||
case 0x12:
|
||||
chunks.push(new DougalBinaryChunkInterleaved(this, chunkOffset, localOffset));
|
||||
break;
|
||||
default:
|
||||
throw new Error('Invalid chunk type');
|
||||
}
|
||||
|
||||
chunkOffset += localOffset;
|
||||
}
|
||||
|
||||
bundleOffset += 4 + length;
|
||||
}
|
||||
|
||||
return chunks;
|
||||
}
|
||||
|
||||
/** Return a ByteArray containing all data from all
|
||||
* chunks including reconstructed i, j and incremental
|
||||
* values as follows:
|
||||
*
|
||||
* <i_0> <i_1> … <i_x> // i values (constant)
|
||||
* <j_0> <j_1> … <j_x> // j values (j0 + Δj*i)
|
||||
* <Δelem_0_0> <Δelem_0_1> … <Δelem_0_x> // reconstructed Δelem0 (uses baseType)
|
||||
* <Δelem_1_0> <Δelem_1_1> … <Δelem_1_x> // reconstructed Δelem1
|
||||
* …
|
||||
* <Δelem_y_0> <Δelem_y_1> … <Δelem_y_x> // reconstructed Δelem1
|
||||
* <elem_0_0> <elem_0_1> … <elem_0_x> // First elem
|
||||
* <elem_1_0> <elem_1_1> … <elem_1_x> // Second elem
|
||||
* …
|
||||
* <elem_z_0> <elem_z_1> … <elem_z_x> // Last elem
|
||||
*
|
||||
* It does not matter whether the underlying chunks are
|
||||
* sequential or interleaved. This function will transform
|
||||
* as necessary.
|
||||
*
|
||||
*/
|
||||
getDataSequentially () {
|
||||
const chunks = this.chunks();
|
||||
if (chunks.length === 0) return new ArrayBuffer(0);
|
||||
|
||||
const firstChunk = chunks[0];
|
||||
const ΔelemC = firstChunk.ΔelemCount;
|
||||
const elemC = firstChunk.elemCount;
|
||||
|
||||
// Check consistency across chunks
|
||||
for (const chunk of chunks) {
|
||||
if (chunk.ΔelemCount !== ΔelemC || chunk.elemCount !== elemC) {
|
||||
throw new Error('Inconsistent chunk structures');
|
||||
}
|
||||
}
|
||||
|
||||
// Get types from first chunk
|
||||
const view = new DataView(firstChunk);
|
||||
const ΔelemBaseTypes = [];
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(12 + k);
|
||||
const baseCode = typeByte & 0xF;
|
||||
const baseType = codeToType[baseCode];
|
||||
if (!baseType) throw new Error('Invalid base type code');
|
||||
ΔelemBaseTypes.push(baseType);
|
||||
}
|
||||
const elemTypes = [];
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
elemTypes.push(type);
|
||||
}
|
||||
|
||||
// Compute total records
|
||||
const totalN = chunks.reduce((sum, c) => sum + c.jCount, 0);
|
||||
|
||||
// Compute sizes
|
||||
const size_i = totalN * 2; // Uint16 for i
|
||||
const size_j = totalN * 4; // Int32 for j
|
||||
let size_Δelems = 0;
|
||||
for (const t of ΔelemBaseTypes) {
|
||||
size_Δelems += totalN * typeToBytes[t.name];
|
||||
}
|
||||
let size_elems = 0;
|
||||
for (const t of elemTypes) {
|
||||
size_elems += totalN * typeToBytes[t.name];
|
||||
}
|
||||
const totalSize = size_i + size_j + size_Δelems + size_elems;
|
||||
|
||||
const ab = new ArrayBuffer(totalSize);
|
||||
const dv = new DataView(ab);
|
||||
|
||||
// Write i's
|
||||
let off = 0;
|
||||
for (const chunk of chunks) {
|
||||
const i = chunk.i;
|
||||
for (let idx = 0; idx < chunk.jCount; idx++) {
|
||||
dv.setUint16(off, i, true);
|
||||
off += 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Write j's
|
||||
off = size_i;
|
||||
for (const chunk of chunks) {
|
||||
const j0 = chunk.j0;
|
||||
const Δj = chunk.Δj;
|
||||
for (let idx = 0; idx < chunk.jCount; idx++) {
|
||||
const j = j0 + idx * Δj;
|
||||
dv.setInt32(off, j, true);
|
||||
off += 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Write Δelems
|
||||
off = size_i + size_j;
|
||||
for (let m = 0; m < ΔelemC; m++) {
|
||||
const type = ΔelemBaseTypes[m];
|
||||
const bytes = typeToBytes[type.name];
|
||||
for (const chunk of chunks) {
|
||||
const arr = chunk.Δelem(m);
|
||||
for (let idx = 0; idx < chunk.jCount; idx++) {
|
||||
writeTypedValue(dv, off, arr[idx], type);
|
||||
off += bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write elems
|
||||
for (let m = 0; m < elemC; m++) {
|
||||
const type = elemTypes[m];
|
||||
const bytes = typeToBytes[type.name];
|
||||
for (const chunk of chunks) {
|
||||
const arr = chunk.elem(m);
|
||||
for (let idx = 0; idx < chunk.jCount; idx++) {
|
||||
writeTypedValue(dv, off, arr[idx], type);
|
||||
off += bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ab;
|
||||
}
|
||||
|
||||
/** Return a ByteArray containing all data from all
|
||||
* chunks including reconstructed i, j and incremental
|
||||
* values, interleaved as follows:
|
||||
*
|
||||
* <i_0> <j_0> <Δelem_0_0> <Δelem_1_0> … <Δelem_y_0> <elem_0_0> <elem_1_0> … <elem_z_0>
|
||||
* <i_1> <j_1> <Δelem_0_1> <Δelem_1_1> … <Δelem_y_1> <elem_0_1> <elem_1_1> … <elem_z_1>
|
||||
* <i_x> <j_x> <Δelem_0_x> <Δelem_1_x> … <Δelem_y_x> <elem_0_x> <elem_1_x> … <elem_z_x>
|
||||
*
|
||||
* It does not matter whether the underlying chunks are
|
||||
* sequential or interleaved. This function will transform
|
||||
* as necessary.
|
||||
*
|
||||
*/
|
||||
getDataInterleaved () {
|
||||
const chunks = this.chunks();
|
||||
if (chunks.length === 0) return new ArrayBuffer(0);
|
||||
|
||||
const firstChunk = chunks[0];
|
||||
const ΔelemC = firstChunk.ΔelemCount;
|
||||
const elemC = firstChunk.elemCount;
|
||||
|
||||
// Check consistency across chunks
|
||||
for (const chunk of chunks) {
|
||||
if (chunk.ΔelemCount !== ΔelemC || chunk.elemCount !== elemC) {
|
||||
throw new Error('Inconsistent chunk structures');
|
||||
}
|
||||
}
|
||||
|
||||
// Get types from first chunk
|
||||
const view = new DataView(firstChunk);
|
||||
const ΔelemBaseTypes = [];
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(12 + k);
|
||||
const baseCode = typeByte & 0xF;
|
||||
const baseType = codeToType[baseCode];
|
||||
if (!baseType) throw new Error('Invalid base type code');
|
||||
ΔelemBaseTypes.push(baseType);
|
||||
}
|
||||
const elemTypes = [];
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
elemTypes.push(type);
|
||||
}
|
||||
|
||||
// Compute total records
|
||||
const totalN = chunks.reduce((sum, c) => sum + c.jCount, 0);
|
||||
|
||||
// Compute record size
|
||||
const recordSize = 2 + 4 + // i (Uint16) + j (Int32)
|
||||
ΔelemBaseTypes.reduce((sum, t) => sum + typeToBytes[t.name], 0) +
|
||||
elemTypes.reduce((sum, t) => sum + typeToBytes[t.name], 0);
|
||||
const totalSize = totalN * recordSize;
|
||||
|
||||
const ab = new ArrayBuffer(totalSize);
|
||||
const dv = new DataView(ab);
|
||||
|
||||
let off = 0;
|
||||
for (const chunk of chunks) {
|
||||
const i = chunk.i;
|
||||
const j0 = chunk.j0;
|
||||
const Δj = chunk.Δj;
|
||||
for (let idx = 0; idx < chunk.jCount; idx++) {
|
||||
dv.setUint16(off, i, true);
|
||||
off += 2;
|
||||
const j = j0 + idx * Δj;
|
||||
dv.setInt32(off, j, true);
|
||||
off += 4;
|
||||
for (let m = 0; m < ΔelemC; m++) {
|
||||
const type = ΔelemBaseTypes[m];
|
||||
const bytes = typeToBytes[type.name];
|
||||
const arr = chunk.Δelem(m);
|
||||
writeTypedValue(dv, off, arr[idx], type);
|
||||
off += bytes;
|
||||
}
|
||||
for (let m = 0; m < elemC; m++) {
|
||||
const type = elemTypes[m];
|
||||
const bytes = typeToBytes[type.name];
|
||||
const arr = chunk.elem(m);
|
||||
writeTypedValue(dv, off, arr[idx], type);
|
||||
off += bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ab;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
class DougalBinaryChunkSequential extends ArrayBuffer {
|
||||
|
||||
constructor (buffer, offset, length) {
|
||||
super(length);
|
||||
new Uint8Array(this).set(new Uint8Array(buffer, offset, length));
|
||||
this._ΔelemCaches = new Array(this.ΔelemCount);
|
||||
this._elemCaches = new Array(this.elemCount);
|
||||
this._ΔelemBlockOffsets = null;
|
||||
this._elemBlockOffsets = null;
|
||||
this._recordOffset = null;
|
||||
}
|
||||
|
||||
_getRecordOffset() {
|
||||
if (this._recordOffset !== null) return this._recordOffset;
|
||||
const view = new DataView(this);
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
const elemC = this.elemCount;
|
||||
|
||||
let recordOffset = 12 + ΔelemC + elemC;
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const tb = view.getUint8(12 + k);
|
||||
const bc = tb & 0xF;
|
||||
const bt = codeToType[bc];
|
||||
recordOffset += typeToBytes[bt.name];
|
||||
}
|
||||
while (recordOffset % 4 !== 0) recordOffset++;
|
||||
this._recordOffset = recordOffset;
|
||||
return recordOffset;
|
||||
}
|
||||
|
||||
_initBlockOffsets() {
|
||||
if (this._ΔelemBlockOffsets !== null) return;
|
||||
const view = new DataView(this);
|
||||
const count = this.jCount;
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
const elemC = this.elemCount;
|
||||
|
||||
const recordOffset = this._getRecordOffset();
|
||||
|
||||
this._ΔelemBlockOffsets = [];
|
||||
let o = recordOffset;
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
this._ΔelemBlockOffsets[k] = o;
|
||||
const tb = view.getUint8(12 + k);
|
||||
const ic = tb >> 4;
|
||||
const it = codeToType[ic];
|
||||
o += count * typeToBytes[it.name];
|
||||
}
|
||||
|
||||
this._elemBlockOffsets = [];
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
this._elemBlockOffsets[k] = o;
|
||||
const tc = view.getUint8(12 + ΔelemC + k);
|
||||
const t = codeToType[tc];
|
||||
o += count * typeToBytes[t.name];
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the user-defined value
|
||||
*/
|
||||
get udv () {
|
||||
return new DataView(this).getUint8(1);
|
||||
}
|
||||
|
||||
/** Return the number of j elements in this chunk
|
||||
*/
|
||||
get jCount () {
|
||||
return new DataView(this).getUint16(2, true);
|
||||
}
|
||||
|
||||
/** Return the i value in this chunk
|
||||
*/
|
||||
get i () {
|
||||
return new DataView(this).getUint16(4, true);
|
||||
}
|
||||
|
||||
/** Return the j0 value in this chunk
|
||||
*/
|
||||
get j0 () {
|
||||
return new DataView(this).getUint16(6, true);
|
||||
}
|
||||
|
||||
/** Return the Δj value in this chunk
|
||||
*/
|
||||
get Δj () {
|
||||
return new DataView(this).getInt16(8, true);
|
||||
}
|
||||
|
||||
/** Return the Δelem_count value in this chunk
|
||||
*/
|
||||
get ΔelemCount () {
|
||||
return new DataView(this).getUint8(10);
|
||||
}
|
||||
|
||||
/** Return the elem_count value in this chunk
|
||||
*/
|
||||
get elemCount () {
|
||||
return new DataView(this).getUint8(11);
|
||||
}
|
||||
|
||||
/** Return a TypedArray (e.g., Uint16Array, …) for the n-th Δelem in the chunk
|
||||
*/
|
||||
Δelem (n) {
|
||||
if (this._ΔelemCaches[n]) return this._ΔelemCaches[n];
|
||||
|
||||
if (n < 0 || n >= this.ΔelemCount) throw new Error(`Invalid Δelem index: ${n}`);
|
||||
const view = new DataView(this);
|
||||
const count = this.jCount;
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
|
||||
const typeByte = view.getUint8(12 + n);
|
||||
const baseCode = typeByte & 0xF;
|
||||
const incrCode = typeByte >> 4;
|
||||
const baseType = codeToType[baseCode];
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!baseType || !incrType) throw new Error('Invalid type codes for Δelem');
|
||||
|
||||
// Find offset for initial value of this Δelem
|
||||
let initialOffset = 12 + ΔelemC + this.elemCount;
|
||||
for (let k = 0; k < n; k++) {
|
||||
const tb = view.getUint8(12 + k);
|
||||
const bc = tb & 0xF;
|
||||
const bt = codeToType[bc];
|
||||
initialOffset += typeToBytes[bt.name];
|
||||
}
|
||||
|
||||
let current = readTypedValue(view, initialOffset, baseType);
|
||||
|
||||
// Advance to start of record data (after all initials and pad)
|
||||
const recordOffset = this._getRecordOffset();
|
||||
|
||||
// Find offset for deltas of this Δelem (skip previous Δelems' delta blocks)
|
||||
this._initBlockOffsets();
|
||||
const deltaOffset = this._ΔelemBlockOffsets[n];
|
||||
|
||||
// Reconstruct the array
|
||||
const arr = new baseType(count);
|
||||
const isBigInt = baseType === BigInt64Array || baseType === BigUint64Array;
|
||||
arr[0] = current;
|
||||
for (let idx = 1; idx < count; idx++) {
|
||||
let delta = readTypedValue(view, deltaOffset + idx * typeToBytes[incrType.name], incrType);
|
||||
if (isBigInt) {
|
||||
delta = BigInt(delta);
|
||||
current += delta;
|
||||
} else {
|
||||
current += delta;
|
||||
}
|
||||
arr[idx] = current;
|
||||
}
|
||||
|
||||
this._ΔelemCaches[n] = arr;
|
||||
return arr;
|
||||
}
|
||||
|
||||
/** Return a TypedArray (e.g., Uint16Array, …) for the n-th elem in the chunk
|
||||
*/
|
||||
elem (n) {
|
||||
if (this._elemCaches[n]) return this._elemCaches[n];
|
||||
|
||||
if (n < 0 || n >= this.elemCount) throw new Error(`Invalid elem index: ${n}`);
|
||||
const view = new DataView(this);
|
||||
const count = this.jCount;
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
const elemC = this.elemCount;
|
||||
|
||||
const typeCode = view.getUint8(12 + ΔelemC + n);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid type code for elem');
|
||||
|
||||
// Find offset for this elem's data block
|
||||
this._initBlockOffsets();
|
||||
const elemOffset = this._elemBlockOffsets[n];
|
||||
|
||||
// Create and populate the array
|
||||
const arr = new type(count);
|
||||
const bytes = typeToBytes[type.name];
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
arr[idx] = readTypedValue(view, elemOffset + idx * bytes, type);
|
||||
}
|
||||
|
||||
this._elemCaches[n] = arr;
|
||||
return arr;
|
||||
}
|
||||
|
||||
getRecord (index) {
|
||||
if (index < 0 || index >= this.jCount) throw new Error(`Invalid record index: ${index}`);
|
||||
|
||||
const arr = [this.udv, this.i, this.j0 + index * this.Δj];
|
||||
|
||||
for (let m = 0; m < this.ΔelemCount; m++) {
|
||||
const values = this.Δelem(m);
|
||||
arr.push(values[index]);
|
||||
}
|
||||
|
||||
for (let m = 0; m < this.elemCount; m++) {
|
||||
const values = this.elem(m);
|
||||
arr.push(values[index]);
|
||||
}
|
||||
|
||||
return arr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class DougalBinaryChunkInterleaved extends ArrayBuffer {
|
||||
constructor(buffer, offset, length) {
|
||||
super(length);
|
||||
new Uint8Array(this).set(new Uint8Array(buffer, offset, length));
|
||||
this._incrStrides = [];
|
||||
this._elemStrides = [];
|
||||
this._incrOffsets = [];
|
||||
this._elemOffsets = [];
|
||||
this._recordStride = 0;
|
||||
this._recordOffset = null;
|
||||
this._initStrides();
|
||||
this._ΔelemCaches = new Array(this.ΔelemCount);
|
||||
this._elemCaches = new Array(this.elemCount);
|
||||
}
|
||||
|
||||
_getRecordOffset() {
|
||||
if (this._recordOffset !== null) return this._recordOffset;
|
||||
const view = new DataView(this);
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
const elemC = this.elemCount;
|
||||
|
||||
let recordOffset = 12 + ΔelemC + elemC;
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const tb = view.getUint8(12 + k);
|
||||
const bc = tb & 0xF;
|
||||
const bt = codeToType[bc];
|
||||
recordOffset += typeToBytes[bt.name];
|
||||
}
|
||||
while (recordOffset % 4 !== 0) recordOffset++;
|
||||
this._recordOffset = recordOffset;
|
||||
return recordOffset;
|
||||
}
|
||||
|
||||
_initStrides() {
|
||||
const view = new DataView(this);
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
const elemC = this.elemCount;
|
||||
|
||||
// Compute incr strides and offsets
|
||||
let incrOffset = 0;
|
||||
for (let k = 0; k < ΔelemC; k++) {
|
||||
const typeByte = view.getUint8(12 + k);
|
||||
const incrCode = typeByte >> 4;
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!incrType) throw new Error('Invalid incr type code');
|
||||
this._incrOffsets.push(incrOffset);
|
||||
const bytes = typeToBytes[incrType.name];
|
||||
this._incrStrides.push(bytes);
|
||||
incrOffset += bytes;
|
||||
this._recordStride += bytes;
|
||||
}
|
||||
|
||||
// Compute elem strides and offsets
|
||||
let elemOffset = incrOffset;
|
||||
for (let k = 0; k < elemC; k++) {
|
||||
const typeCode = view.getUint8(12 + ΔelemC + k);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid elem type code');
|
||||
this._elemOffsets.push(elemOffset);
|
||||
const bytes = typeToBytes[type.name];
|
||||
this._elemStrides.push(bytes);
|
||||
elemOffset += bytes;
|
||||
this._recordStride += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
get udv() {
|
||||
return new DataView(this).getUint8(1);
|
||||
}
|
||||
|
||||
get jCount() {
|
||||
return new DataView(this).getUint16(2, true);
|
||||
}
|
||||
|
||||
get i() {
|
||||
return new DataView(this).getUint16(4, true);
|
||||
}
|
||||
|
||||
get j0() {
|
||||
return new DataView(this).getUint16(6, true);
|
||||
}
|
||||
|
||||
get Δj() {
|
||||
return new DataView(this).getInt16(8, true);
|
||||
}
|
||||
|
||||
get ΔelemCount() {
|
||||
return new DataView(this).getUint8(10);
|
||||
}
|
||||
|
||||
get elemCount() {
|
||||
return new DataView(this).getUint8(11);
|
||||
}
|
||||
|
||||
Δelem(n) {
|
||||
if (this._ΔelemCaches[n]) return this._ΔelemCaches[n];
|
||||
|
||||
if (n < 0 || n >= this.ΔelemCount) throw new Error(`Invalid Δelem index: ${n}`);
|
||||
const view = new DataView(this);
|
||||
const count = this.jCount;
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
|
||||
const typeByte = view.getUint8(12 + n);
|
||||
const baseCode = typeByte & 0xF;
|
||||
const incrCode = typeByte >> 4;
|
||||
const baseType = codeToType[baseCode];
|
||||
const incrType = codeToType[incrCode];
|
||||
if (!baseType || !incrType) throw new Error('Invalid type codes for Δelem');
|
||||
|
||||
// Find offset for initial value of this Δelem
|
||||
let initialOffset = 12 + ΔelemC + this.elemCount;
|
||||
for (let k = 0; k < n; k++) {
|
||||
const tb = view.getUint8(12 + k);
|
||||
const bc = tb & 0xF;
|
||||
const bt = codeToType[bc];
|
||||
initialOffset += typeToBytes[bt.name];
|
||||
}
|
||||
|
||||
let current = readTypedValue(view, initialOffset, baseType);
|
||||
|
||||
// Find offset to start of record data
|
||||
const recordOffset = this._getRecordOffset();
|
||||
|
||||
// Use precomputed offset for this Δelem
|
||||
const deltaOffset = recordOffset + this._incrOffsets[n];
|
||||
|
||||
// Reconstruct the array
|
||||
const arr = new baseType(count);
|
||||
const isBigInt = baseType === BigInt64Array || baseType === BigUint64Array;
|
||||
arr[0] = current;
|
||||
for (let idx = 1; idx < count; idx++) {
|
||||
let delta = readTypedValue(view, deltaOffset + idx * this._recordStride, incrType);
|
||||
if (isBigInt) {
|
||||
delta = BigInt(delta);
|
||||
current += delta;
|
||||
} else {
|
||||
current += delta;
|
||||
}
|
||||
arr[idx] = current;
|
||||
}
|
||||
|
||||
this._ΔelemCaches[n] = arr;
|
||||
return arr;
|
||||
}
|
||||
|
||||
elem(n) {
|
||||
if (this._elemCaches[n]) return this._elemCaches[n];
|
||||
|
||||
if (n < 0 || n >= this.elemCount) throw new Error(`Invalid elem index: ${n}`);
|
||||
const view = new DataView(this);
|
||||
const count = this.jCount;
|
||||
const ΔelemC = this.ΔelemCount;
|
||||
|
||||
const typeCode = view.getUint8(12 + ΔelemC + n);
|
||||
const type = codeToType[typeCode];
|
||||
if (!type) throw new Error('Invalid type code for elem');
|
||||
|
||||
// Find offset to start of record data
|
||||
const recordOffset = this._getRecordOffset();
|
||||
|
||||
// Use precomputed offset for this elem (relative to start of record data)
|
||||
const elemOffset = recordOffset + this._elemOffsets[n];
|
||||
|
||||
// Create and populate the array
|
||||
const arr = new type(count);
|
||||
const bytes = typeToBytes[type.name];
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
arr[idx] = readTypedValue(view, elemOffset + idx * this._recordStride, type);
|
||||
}
|
||||
|
||||
this._elemCaches[n] = arr;
|
||||
return arr;
|
||||
}
|
||||
|
||||
getRecord (index) {
|
||||
if (index < 0 || index >= this.jCount) throw new Error(`Invalid record index: ${index}`);
|
||||
|
||||
const arr = [this.udv, this.i, this.j0 + index * this.Δj];
|
||||
|
||||
for (let m = 0; m < this.ΔelemCount; m++) {
|
||||
const values = this.Δelem(m);
|
||||
arr.push(values[index]);
|
||||
}
|
||||
|
||||
for (let m = 0; m < this.elemCount; m++) {
|
||||
const values = this.elem(m);
|
||||
arr.push(values[index]);
|
||||
}
|
||||
|
||||
return arr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = { DougalBinaryBundle, DougalBinaryChunkSequential, DougalBinaryChunkInterleaved }
|
||||
327
lib/modules/@dougal/binary/decode.js
Normal file
327
lib/modules/@dougal/binary/decode.js
Normal file
@@ -0,0 +1,327 @@
|
||||
const codeToType = {
|
||||
0: Int8Array,
|
||||
1: Uint8Array,
|
||||
2: Int16Array,
|
||||
3: Uint16Array,
|
||||
4: Int32Array,
|
||||
5: Uint32Array,
|
||||
7: Float32Array,
|
||||
8: Float64Array,
|
||||
9: BigInt64Array,
|
||||
10: BigUint64Array
|
||||
};
|
||||
|
||||
const typeToBytes = {
|
||||
Int8Array: 1,
|
||||
Uint8Array: 1,
|
||||
Int16Array: 2,
|
||||
Uint16Array: 2,
|
||||
Int32Array: 4,
|
||||
Uint32Array: 4,
|
||||
Float32Array: 4,
|
||||
Float64Array: 8,
|
||||
BigInt64Array: 8,
|
||||
BigUint64Array: 8
|
||||
};
|
||||
|
||||
function sequential(binary) {
|
||||
if (!(binary instanceof Uint8Array) || binary.length < 4) {
|
||||
throw new Error('Invalid binary input');
|
||||
}
|
||||
|
||||
const view = new DataView(binary.buffer, binary.byteOffset, binary.byteLength);
|
||||
let offset = 0;
|
||||
|
||||
// Initialize result (assuming single i value for simplicity; extend for multiple i values if needed)
|
||||
const result = { i: null, j: [], Δelems: [], elems: [] };
|
||||
|
||||
// Process bundles
|
||||
while (offset < binary.length) {
|
||||
// Read bundle header
|
||||
if (offset + 4 > binary.length) throw new Error('Incomplete bundle header');
|
||||
|
||||
const bundleHeader = view.getUint32(offset, true);
|
||||
if ((bundleHeader & 0xFF) !== 0x1C) throw new Error('Invalid bundle marker');
|
||||
const bundleLength = bundleHeader >> 8;
|
||||
offset += 4;
|
||||
const bundleEnd = offset + bundleLength;
|
||||
|
||||
if (bundleEnd > binary.length) throw new Error('Bundle length exceeds input size');
|
||||
|
||||
// Process chunks in bundle
|
||||
while (offset < bundleEnd) {
|
||||
// Read chunk header
|
||||
if (offset + 12 > bundleEnd) throw new Error('Incomplete chunk header');
|
||||
const chunkType = view.getUint8(offset);
|
||||
if (chunkType !== 0x11) throw new Error(`Unsupported chunk type: ${chunkType}`);
|
||||
offset += 1; // Skip udv
|
||||
offset += 1;
|
||||
const count = view.getUint16(offset, true); offset += 2;
|
||||
if (count > 65535) throw new Error('Chunk count exceeds 65535');
|
||||
const iValue = view.getUint16(offset, true); offset += 2;
|
||||
const j0 = view.getUint16(offset, true); offset += 2;
|
||||
const Δj = view.getInt16(offset, true); offset += 2;
|
||||
const ΔelemCount = view.getUint8(offset++); // Δelem_count
|
||||
const elemCount = view.getUint8(offset++); // elem_count
|
||||
|
||||
// Set i value (assuming all chunks share the same i)
|
||||
if (result.i === null) result.i = iValue;
|
||||
else if (result.i !== iValue) throw new Error('Multiple i values not supported');
|
||||
|
||||
// Read preface (element types)
|
||||
const ΔelemTypes = [];
|
||||
for (let i = 0; i < ΔelemCount; i++) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete Δelem types');
|
||||
const typeByte = view.getUint8(offset++);
|
||||
const baseCode = typeByte & 0x0F;
|
||||
const incrCode = typeByte >> 4;
|
||||
if (!codeToType[baseCode] || !codeToType[incrCode]) {
|
||||
throw new Error(`Invalid type code in Δelem: ${typeByte}`);
|
||||
}
|
||||
ΔelemTypes.push({ baseType: codeToType[baseCode], incrType: codeToType[incrCode] });
|
||||
}
|
||||
const elemTypes = [];
|
||||
for (let i = 0; i < elemCount; i++) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete elem types');
|
||||
const typeCode = view.getUint8(offset++);
|
||||
if (!codeToType[typeCode]) throw new Error(`Invalid type code in elem: ${typeCode}`);
|
||||
elemTypes.push(codeToType[typeCode]);
|
||||
}
|
||||
|
||||
// Initialize Δelems and elems arrays if first chunk
|
||||
if (!result.Δelems.length && ΔelemCount > 0) {
|
||||
result.Δelems = Array(ΔelemCount).fill().map(() => []);
|
||||
}
|
||||
if (!result.elems.length && elemCount > 0) {
|
||||
result.elems = Array(elemCount).fill().map(() => []);
|
||||
}
|
||||
|
||||
// Read initial values for Δelems
|
||||
const initialValues = [];
|
||||
for (const { baseType } of ΔelemTypes) {
|
||||
if (offset + typeToBytes[baseType.name] > bundleEnd) {
|
||||
throw new Error('Incomplete initial values');
|
||||
}
|
||||
initialValues.push(readTypedValue(view, offset, baseType));
|
||||
offset += typeToBytes[baseType.name];
|
||||
}
|
||||
// Skip padding
|
||||
while (offset % 4 !== 0) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete padding after initial values');
|
||||
offset++;
|
||||
}
|
||||
|
||||
// Reconstruct j values
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
result.j.push(j0 + idx * Δj);
|
||||
}
|
||||
|
||||
// Read record data (non-interleaved)
|
||||
for (let i = 0; i < ΔelemCount; i++) {
|
||||
let current = initialValues[i];
|
||||
const values = result.Δelems[i];
|
||||
const incrType = ΔelemTypes[i].incrType;
|
||||
const isBigInt = typeof current === 'bigint';
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
if (offset + typeToBytes[incrType.name] > bundleEnd) {
|
||||
throw new Error('Incomplete Δelem data');
|
||||
}
|
||||
let delta = readTypedValue(view, offset, incrType);
|
||||
if (idx === 0) {
|
||||
values.push(isBigInt ? Number(current) : current);
|
||||
} else {
|
||||
if (isBigInt) {
|
||||
delta = BigInt(delta);
|
||||
current += delta;
|
||||
values.push(Number(current));
|
||||
} else {
|
||||
current += delta;
|
||||
values.push(current);
|
||||
}
|
||||
}
|
||||
offset += typeToBytes[incrType.name];
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < elemCount; i++) {
|
||||
const values = result.elems[i];
|
||||
const type = elemTypes[i];
|
||||
const isBigInt = type === BigInt64Array || type === BigUint64Array;
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
if (offset + typeToBytes[type.name] > bundleEnd) {
|
||||
throw new Error('Incomplete elem data');
|
||||
}
|
||||
let value = readTypedValue(view, offset, type);
|
||||
values.push(isBigInt ? Number(value) : value);
|
||||
offset += typeToBytes[type.name];
|
||||
}
|
||||
}
|
||||
// Skip padding
|
||||
while (offset % 4 !== 0) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete padding after record data');
|
||||
offset++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function interleaved(binary) {
|
||||
if (!(binary instanceof Uint8Array) || binary.length < 4) {
|
||||
throw new Error('Invalid binary input');
|
||||
}
|
||||
|
||||
const view = new DataView(binary.buffer, binary.byteOffset, binary.byteLength);
|
||||
let offset = 0;
|
||||
|
||||
// Initialize result (assuming single i value for simplicity; extend for multiple i values if needed)
|
||||
const result = { i: null, j: [], Δelems: [], elems: [] };
|
||||
|
||||
// Process bundles
|
||||
while (offset < binary.length) {
|
||||
// Read bundle header
|
||||
if (offset + 4 > binary.length) throw new Error('Incomplete bundle header');
|
||||
|
||||
const bundleHeader = view.getUint32(offset, true);
|
||||
if ((bundleHeader & 0xFF) !== 0x1C) throw new Error('Invalid bundle marker');
|
||||
const bundleLength = bundleHeader >> 8;
|
||||
offset += 4;
|
||||
const bundleEnd = offset + bundleLength;
|
||||
|
||||
if (bundleEnd > binary.length) throw new Error('Bundle length exceeds input size');
|
||||
|
||||
// Process chunks in bundle
|
||||
while (offset < bundleEnd) {
|
||||
// Read chunk header
|
||||
if (offset + 12 > bundleEnd) throw new Error('Incomplete chunk header');
|
||||
const chunkType = view.getUint8(offset);
|
||||
if (chunkType !== 0x12) throw new Error(`Unsupported chunk type: ${chunkType}`);
|
||||
offset += 1; // Skip udv
|
||||
offset += 1;
|
||||
const count = view.getUint16(offset, true); offset += 2;
|
||||
if (count > 65535) throw new Error('Chunk count exceeds 65535');
|
||||
const iValue = view.getUint16(offset, true); offset += 2;
|
||||
const j0 = view.getUint16(offset, true); offset += 2;
|
||||
const Δj = view.getInt16(offset, true); offset += 2;
|
||||
const ΔelemCount = view.getUint8(offset++); // Δelem_count
|
||||
const elemCount = view.getUint8(offset++); // elem_count
|
||||
|
||||
// Set i value (assuming all chunks share the same i)
|
||||
if (result.i === null) result.i = iValue;
|
||||
else if (result.i !== iValue) throw new Error('Multiple i values not supported');
|
||||
|
||||
// Read preface (element types)
|
||||
const ΔelemTypes = [];
|
||||
for (let i = 0; i < ΔelemCount; i++) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete Δelem types');
|
||||
const typeByte = view.getUint8(offset++);
|
||||
const baseCode = typeByte & 0x0F;
|
||||
const incrCode = typeByte >> 4;
|
||||
if (!codeToType[baseCode] || !codeToType[incrCode]) {
|
||||
throw new Error(`Invalid type code in Δelem: ${typeByte}`);
|
||||
}
|
||||
ΔelemTypes.push({ baseType: codeToType[baseCode], incrType: codeToType[incrCode] });
|
||||
}
|
||||
const elemTypes = [];
|
||||
for (let i = 0; i < elemCount; i++) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete elem types');
|
||||
const typeCode = view.getUint8(offset++);
|
||||
if (!codeToType[typeCode]) throw new Error(`Invalid type code in elem: ${typeCode}`);
|
||||
elemTypes.push(codeToType[typeCode]);
|
||||
}
|
||||
|
||||
// Initialize Δelems and elems arrays if first chunk
|
||||
if (!result.Δelems.length && ΔelemCount > 0) {
|
||||
result.Δelems = Array(ΔelemCount).fill().map(() => []);
|
||||
}
|
||||
if (!result.elems.length && elemCount > 0) {
|
||||
result.elems = Array(elemCount).fill().map(() => []);
|
||||
}
|
||||
|
||||
// Read initial values for Δelems
|
||||
const initialValues = [];
|
||||
for (const { baseType } of ΔelemTypes) {
|
||||
if (offset + typeToBytes[baseType.name] > bundleEnd) {
|
||||
throw new Error('Incomplete initial values');
|
||||
}
|
||||
initialValues.push(readTypedValue(view, offset, baseType));
|
||||
offset += typeToBytes[baseType.name];
|
||||
}
|
||||
// Skip padding
|
||||
while (offset % 4 !== 0) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete padding after initial values');
|
||||
offset++;
|
||||
}
|
||||
|
||||
// Reconstruct j values
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
result.j.push(j0 + idx * Δj);
|
||||
}
|
||||
|
||||
// Read interleaved record data
|
||||
for (let idx = 0; idx < count; idx++) {
|
||||
// Read Δelems
|
||||
for (let i = 0; i < ΔelemCount; i++) {
|
||||
const values = result.Δelems[i];
|
||||
const incrType = ΔelemTypes[i].incrType;
|
||||
const isBigInt = typeof initialValues[i] === 'bigint';
|
||||
if (offset + typeToBytes[incrType.name] > bundleEnd) {
|
||||
throw new Error('Incomplete Δelem data');
|
||||
}
|
||||
let delta = readTypedValue(view, offset, incrType);
|
||||
offset += typeToBytes[incrType.name];
|
||||
if (idx === 0) {
|
||||
values.push(isBigInt ? Number(initialValues[i]) : initialValues[i]);
|
||||
} else {
|
||||
if (isBigInt) {
|
||||
delta = BigInt(delta);
|
||||
initialValues[i] += delta;
|
||||
values.push(Number(initialValues[i]));
|
||||
} else {
|
||||
initialValues[i] += delta;
|
||||
values.push(initialValues[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Read elems
|
||||
for (let i = 0; i < elemCount; i++) {
|
||||
const values = result.elems[i];
|
||||
const type = elemTypes[i];
|
||||
const isBigInt = type === BigInt64Array || type === BigUint64Array;
|
||||
if (offset + typeToBytes[type.name] > bundleEnd) {
|
||||
throw new Error('Incomplete elem data');
|
||||
}
|
||||
let value = readTypedValue(view, offset, type);
|
||||
values.push(isBigInt ? Number(value) : value);
|
||||
offset += typeToBytes[type.name];
|
||||
}
|
||||
}
|
||||
// Skip padding
|
||||
while (offset % 4 !== 0) {
|
||||
if (offset >= bundleEnd) throw new Error('Incomplete padding after record data');
|
||||
offset++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function readTypedValue(view, offset, type) {
|
||||
switch (type) {
|
||||
case Int8Array: return view.getInt8(offset);
|
||||
case Uint8Array: return view.getUint8(offset);
|
||||
case Int16Array: return view.getInt16(offset, true);
|
||||
case Uint16Array: return view.getUint16(offset, true);
|
||||
case Int32Array: return view.getInt32(offset, true);
|
||||
case Uint32Array: return view.getUint32(offset, true);
|
||||
case Float32Array: return view.getFloat32(offset, true);
|
||||
case Float64Array: return view.getFloat64(offset, true);
|
||||
case BigInt64Array: return view.getBigInt64(offset, true);
|
||||
case BigUint64Array: return view.getBigUint64(offset, true);
|
||||
default: throw new Error(`Unsupported type: ${type.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { sequential, interleaved };
|
||||
380
lib/modules/@dougal/binary/encode.js
Normal file
380
lib/modules/@dougal/binary/encode.js
Normal file
@@ -0,0 +1,380 @@
|
||||
const typeToCode = {
|
||||
Int8Array: 0,
|
||||
Uint8Array: 1,
|
||||
Int16Array: 2,
|
||||
Uint16Array: 3,
|
||||
Int32Array: 4,
|
||||
Uint32Array: 5,
|
||||
Float32Array: 7, // Float16 not natively supported in JS, use Float32
|
||||
Float64Array: 8,
|
||||
BigInt64Array: 9,
|
||||
BigUint64Array: 10
|
||||
};
|
||||
|
||||
const typeToBytes = {
|
||||
Int8Array: 1,
|
||||
Uint8Array: 1,
|
||||
Int16Array: 2,
|
||||
Uint16Array: 2,
|
||||
Int32Array: 4,
|
||||
Uint32Array: 4,
|
||||
Float32Array: 4,
|
||||
Float64Array: 8,
|
||||
BigInt64Array: 8,
|
||||
BigUint64Array: 8
|
||||
};
|
||||
|
||||
function sequential(json, iGetter, jGetter, Δelems = [], elems = [], udv = 0) {
|
||||
if (!Array.isArray(json) || !json.length) return new Uint8Array(0);
|
||||
if (typeof iGetter !== 'function' || typeof jGetter !== 'function') throw new Error('i and j must be getter functions');
|
||||
Δelems.forEach((elem, idx) => {
|
||||
if (typeof elem.key !== 'function') throw new Error(`Δelems[${idx}].key must be a getter function`);
|
||||
});
|
||||
elems.forEach((elem, idx) => {
|
||||
if (typeof elem.key !== 'function') throw new Error(`elems[${idx}].key must be a getter function`);
|
||||
});
|
||||
|
||||
// Group records by i value
|
||||
const groups = new Map();
|
||||
for (const record of json) {
|
||||
const iValue = iGetter(record);
|
||||
if (iValue == null) throw new Error('Missing i value from getter');
|
||||
if (!groups.has(iValue)) groups.set(iValue, []);
|
||||
groups.get(iValue).push(record);
|
||||
}
|
||||
|
||||
const maxBundleSize = 0xFFFFFF; // Max bundle length (24 bits)
|
||||
const buffers = [];
|
||||
|
||||
// Process each group (i value)
|
||||
for (const [iValue, records] of groups) {
|
||||
// Sort records by j to ensure consistent order
|
||||
records.sort((a, b) => jGetter(a) - jGetter(b));
|
||||
const jValues = records.map(jGetter);
|
||||
if (jValues.some(v => v == null)) throw new Error('Missing j value from getter');
|
||||
|
||||
// Split records into chunks based on Δj continuity
|
||||
const chunks = [];
|
||||
let currentChunk = [records[0]];
|
||||
let currentJ0 = jValues[0];
|
||||
let currentΔj = records.length > 1 ? jValues[1] - jValues[0] : 0;
|
||||
|
||||
for (let idx = 1; idx < records.length; idx++) {
|
||||
const chunkIndex = chunks.reduce((sum, c) => sum + c.records.length, 0);
|
||||
const expectedJ = currentJ0 + (idx - chunkIndex) * currentΔj;
|
||||
if (jValues[idx] !== expectedJ || idx - chunkIndex >= 65536) {
|
||||
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
|
||||
currentChunk = [records[idx]];
|
||||
currentJ0 = jValues[idx];
|
||||
currentΔj = idx + 1 < records.length ? jValues[idx + 1] - jValues[idx] : 0;
|
||||
} else {
|
||||
currentChunk.push(records[idx]);
|
||||
}
|
||||
}
|
||||
if (currentChunk.length > 0) {
|
||||
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
|
||||
}
|
||||
|
||||
// Calculate total size for all chunks in this group by simulating offsets
|
||||
const chunkSizes = chunks.map(({ records: chunkRecords }) => {
|
||||
if (chunkRecords.length > 65535) throw new Error(`Chunk size exceeds 65535 for i=${iValue}`);
|
||||
let simulatedOffset = 0; // Relative to chunk start
|
||||
simulatedOffset += 12; // Header
|
||||
simulatedOffset += Δelems.length + elems.length; // Preface
|
||||
simulatedOffset += Δelems.reduce((sum, e) => sum + typeToBytes[e.baseType.name], 0); // Initial values
|
||||
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after initial
|
||||
simulatedOffset += chunkRecords.length * (
|
||||
Δelems.reduce((sum, e) => sum + typeToBytes[e.incrType.name], 0) +
|
||||
elems.reduce((sum, e) => sum + typeToBytes[e.type.name], 0)
|
||||
); // Record data
|
||||
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after record
|
||||
return simulatedOffset;
|
||||
});
|
||||
const totalChunkSize = chunkSizes.reduce((sum, size) => sum + size, 0);
|
||||
|
||||
// Start a new bundle if needed
|
||||
const lastBundle = buffers[buffers.length - 1];
|
||||
if (!lastBundle || lastBundle.offset + totalChunkSize > maxBundleSize) {
|
||||
buffers.push({ offset: 4, buffer: null, view: null });
|
||||
}
|
||||
|
||||
// Initialize DataView for current bundle
|
||||
const currentBundle = buffers[buffers.length - 1];
|
||||
if (!currentBundle.buffer) {
|
||||
const requiredSize = totalChunkSize + 4;
|
||||
currentBundle.buffer = new ArrayBuffer(requiredSize);
|
||||
currentBundle.view = new DataView(currentBundle.buffer);
|
||||
}
|
||||
|
||||
// Process each chunk
|
||||
for (const { records: chunkRecords, j0, Δj } of chunks) {
|
||||
const chunkSize = chunkSizes.shift();
|
||||
|
||||
// Ensure buffer is large enough
|
||||
if (currentBundle.offset + chunkSize > currentBundle.buffer.byteLength) {
|
||||
const newSize = currentBundle.offset + chunkSize;
|
||||
const newBuffer = new ArrayBuffer(newSize);
|
||||
new Uint8Array(newBuffer).set(new Uint8Array(currentBundle.buffer));
|
||||
currentBundle.buffer = newBuffer;
|
||||
currentBundle.view = new DataView(newBuffer);
|
||||
}
|
||||
|
||||
// Write chunk header
|
||||
let offset = currentBundle.offset;
|
||||
currentBundle.view.setUint8(offset++, 0x11); // Chunk type
|
||||
currentBundle.view.setUint8(offset++, udv); // udv
|
||||
currentBundle.view.setUint16(offset, chunkRecords.length, true); offset += 2; // count
|
||||
currentBundle.view.setUint16(offset, iValue, true); offset += 2; // i
|
||||
currentBundle.view.setUint16(offset, j0, true); offset += 2; // j0
|
||||
currentBundle.view.setInt16(offset, Δj, true); offset += 2; // Δj
|
||||
currentBundle.view.setUint8(offset++, Δelems.length); // Δelem_count
|
||||
currentBundle.view.setUint8(offset++, elems.length); // elem_count
|
||||
|
||||
// Write chunk preface (element types)
|
||||
for (const elem of Δelems) {
|
||||
const baseCode = typeToCode[elem.baseType.name];
|
||||
const incrCode = typeToCode[elem.incrType.name];
|
||||
currentBundle.view.setUint8(offset++, (incrCode << 4) | baseCode);
|
||||
}
|
||||
for (const elem of elems) {
|
||||
currentBundle.view.setUint8(offset++, typeToCode[elem.type.name]);
|
||||
}
|
||||
|
||||
// Write initial values for Δelems
|
||||
for (const elem of Δelems) {
|
||||
const value = elem.key(chunkRecords[0]);
|
||||
if (value == null) throw new Error('Missing Δelem value from getter');
|
||||
writeTypedValue(currentBundle.view, offset, value, elem.baseType);
|
||||
offset += typeToBytes[elem.baseType.name];
|
||||
}
|
||||
// Pad to 4-byte boundary
|
||||
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
|
||||
|
||||
// Write record data (non-interleaved)
|
||||
for (const elem of Δelems) {
|
||||
let prev = elem.key(chunkRecords[0]);
|
||||
for (let idx = 0; idx < chunkRecords.length; idx++) {
|
||||
const value = idx === 0 ? 0 : elem.key(chunkRecords[idx]) - prev;
|
||||
writeTypedValue(currentBundle.view, offset, value, elem.incrType);
|
||||
offset += typeToBytes[elem.incrType.name];
|
||||
prev = elem.key(chunkRecords[idx]);
|
||||
}
|
||||
}
|
||||
for (const elem of elems) {
|
||||
for (const record of chunkRecords) {
|
||||
const value = elem.key(record);
|
||||
if (value == null) throw new Error('Missing elem value from getter');
|
||||
writeTypedValue(currentBundle.view, offset, value, elem.type);
|
||||
offset += typeToBytes[elem.type.name];
|
||||
}
|
||||
}
|
||||
// Pad to 4-byte boundary
|
||||
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
|
||||
|
||||
// Update bundle offset
|
||||
currentBundle.offset = offset;
|
||||
}
|
||||
|
||||
// Update bundle header
|
||||
currentBundle.view.setUint32(0, 0x1C | ((currentBundle.offset - 4) << 8), true);
|
||||
}
|
||||
|
||||
// Combine buffers into final Uint8Array
|
||||
const finalLength = buffers.reduce((sum, b) => sum + b.offset, 0);
|
||||
const result = new Uint8Array(finalLength);
|
||||
let offset = 0;
|
||||
for (const { buffer, offset: bundleOffset } of buffers) {
|
||||
result.set(new Uint8Array(buffer, 0, bundleOffset), offset);
|
||||
offset += bundleOffset;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function interleaved(json, iGetter, jGetter, Δelems = [], elems = [], udv = 0) {
|
||||
if (!Array.isArray(json) || !json.length) return new Uint8Array(0);
|
||||
if (typeof iGetter !== 'function' || typeof jGetter !== 'function') throw new Error('i and j must be getter functions');
|
||||
Δelems.forEach((elem, idx) => {
|
||||
if (typeof elem.key !== 'function') throw new Error(`Δelems[${idx}].key must be a getter function`);
|
||||
});
|
||||
elems.forEach((elem, idx) => {
|
||||
if (typeof elem.key !== 'function') throw new Error(`elems[${idx}].key must be a getter function`);
|
||||
});
|
||||
|
||||
// Group records by i value
|
||||
const groups = new Map();
|
||||
for (const record of json) {
|
||||
const iValue = iGetter(record);
|
||||
if (iValue == null) throw new Error('Missing i value from getter');
|
||||
if (!groups.has(iValue)) groups.set(iValue, []);
|
||||
groups.get(iValue).push(record);
|
||||
}
|
||||
|
||||
const maxBundleSize = 0xFFFFFF; // Max bundle length (24 bits)
|
||||
const buffers = [];
|
||||
|
||||
// Process each group (i value)
|
||||
for (const [iValue, records] of groups) {
|
||||
// Sort records by j to ensure consistent order
|
||||
records.sort((a, b) => jGetter(a) - jGetter(b));
|
||||
const jValues = records.map(jGetter);
|
||||
if (jValues.some(v => v == null)) throw new Error('Missing j value from getter');
|
||||
|
||||
// Split records into chunks based on Δj continuity
|
||||
const chunks = [];
|
||||
let currentChunk = [records[0]];
|
||||
let currentJ0 = jValues[0];
|
||||
let currentΔj = records.length > 1 ? jValues[1] - jValues[0] : 0;
|
||||
|
||||
for (let idx = 1; idx < records.length; idx++) {
|
||||
const chunkIndex = chunks.reduce((sum, c) => sum + c.records.length, 0);
|
||||
const expectedJ = currentJ0 + (idx - chunkIndex) * currentΔj;
|
||||
if (jValues[idx] !== expectedJ || idx - chunkIndex >= 65536) {
|
||||
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
|
||||
currentChunk = [records[idx]];
|
||||
currentJ0 = jValues[idx];
|
||||
currentΔj = idx + 1 < records.length ? jValues[idx + 1] - jValues[idx] : 0;
|
||||
} else {
|
||||
currentChunk.push(records[idx]);
|
||||
}
|
||||
}
|
||||
if (currentChunk.length > 0) {
|
||||
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
|
||||
}
|
||||
|
||||
// Calculate total size for all chunks in this group by simulating offsets
|
||||
const chunkSizes = chunks.map(({ records: chunkRecords }) => {
|
||||
if (chunkRecords.length > 65535) throw new Error(`Chunk size exceeds 65535 for i=${iValue}`);
|
||||
let simulatedOffset = 0; // Relative to chunk start
|
||||
simulatedOffset += 12; // Header
|
||||
simulatedOffset += Δelems.length + elems.length; // Preface
|
||||
simulatedOffset += Δelems.reduce((sum, e) => sum + typeToBytes[e.baseType.name], 0); // Initial values
|
||||
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after initial
|
||||
simulatedOffset += chunkRecords.length * (
|
||||
Δelems.reduce((sum, e) => sum + typeToBytes[e.incrType.name], 0) +
|
||||
elems.reduce((sum, e) => sum + typeToBytes[e.type.name], 0)
|
||||
); // Interleaved record data
|
||||
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after record
|
||||
return simulatedOffset;
|
||||
});
|
||||
const totalChunkSize = chunkSizes.reduce((sum, size) => sum + size, 0);
|
||||
|
||||
// Start a new bundle if needed
|
||||
const lastBundle = buffers[buffers.length - 1];
|
||||
if (!lastBundle || lastBundle.offset + totalChunkSize > maxBundleSize) {
|
||||
buffers.push({ offset: 4, buffer: null, view: null });
|
||||
}
|
||||
|
||||
// Initialize DataView for current bundle
|
||||
const currentBundle = buffers[buffers.length - 1];
|
||||
if (!currentBundle.buffer) {
|
||||
const requiredSize = totalChunkSize + 4;
|
||||
currentBundle.buffer = new ArrayBuffer(requiredSize);
|
||||
currentBundle.view = new DataView(currentBundle.buffer);
|
||||
}
|
||||
|
||||
// Process each chunk
|
||||
for (const { records: chunkRecords, j0, Δj } of chunks) {
|
||||
const chunkSize = chunkSizes.shift();
|
||||
|
||||
// Ensure buffer is large enough
|
||||
if (currentBundle.offset + chunkSize > currentBundle.buffer.byteLength) {
|
||||
const newSize = currentBundle.offset + chunkSize;
|
||||
const newBuffer = new ArrayBuffer(newSize);
|
||||
new Uint8Array(newBuffer).set(new Uint8Array(currentBundle.buffer));
|
||||
currentBundle.buffer = newBuffer;
|
||||
currentBundle.view = new DataView(newBuffer);
|
||||
}
|
||||
|
||||
// Write chunk header
|
||||
let offset = currentBundle.offset;
|
||||
currentBundle.view.setUint8(offset++, 0x12); // Chunk type
|
||||
currentBundle.view.setUint8(offset++, udv); // udv
|
||||
currentBundle.view.setUint16(offset, chunkRecords.length, true); offset += 2; // count
|
||||
currentBundle.view.setUint16(offset, iValue, true); offset += 2; // i
|
||||
currentBundle.view.setUint16(offset, j0, true); offset += 2; // j0
|
||||
currentBundle.view.setInt16(offset, Δj, true); offset += 2; // Δj
|
||||
currentBundle.view.setUint8(offset++, Δelems.length); // Δelem_count
|
||||
currentBundle.view.setUint8(offset++, elems.length); // elem_count
|
||||
|
||||
// Write chunk preface (element types)
|
||||
for (const elem of Δelems) {
|
||||
const baseCode = typeToCode[elem.baseType.name];
|
||||
const incrCode = typeToCode[elem.incrType.name];
|
||||
currentBundle.view.setUint8(offset++, (incrCode << 4) | baseCode);
|
||||
}
|
||||
for (const elem of elems) {
|
||||
currentBundle.view.setUint8(offset++, typeToCode[elem.type.name]);
|
||||
}
|
||||
|
||||
// Write initial values for Δelems
|
||||
for (const elem of Δelems) {
|
||||
const value = elem.key(chunkRecords[0]);
|
||||
if (value == null) throw new Error('Missing Δelem value from getter');
|
||||
writeTypedValue(currentBundle.view, offset, value, elem.baseType);
|
||||
offset += typeToBytes[elem.baseType.name];
|
||||
}
|
||||
// Pad to 4-byte boundary
|
||||
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
|
||||
|
||||
// Write interleaved record data
|
||||
const prevValues = Δelems.map(elem => elem.key(chunkRecords[0]));
|
||||
for (let idx = 0; idx < chunkRecords.length; idx++) {
|
||||
// Write Δelems increments
|
||||
for (let i = 0; i < Δelems.length; i++) {
|
||||
const elem = Δelems[i];
|
||||
const value = idx === 0 ? 0 : elem.key(chunkRecords[idx]) - prevValues[i];
|
||||
writeTypedValue(currentBundle.view, offset, value, elem.incrType);
|
||||
offset += typeToBytes[elem.incrType.name];
|
||||
prevValues[i] = elem.key(chunkRecords[idx]);
|
||||
}
|
||||
// Write elems
|
||||
for (const elem of elems) {
|
||||
const value = elem.key(chunkRecords[idx]);
|
||||
if (value == null) throw new Error('Missing elem value from getter');
|
||||
writeTypedValue(currentBundle.view, offset, value, elem.type);
|
||||
offset += typeToBytes[elem.type.name];
|
||||
}
|
||||
}
|
||||
// Pad to 4-byte boundary
|
||||
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
|
||||
|
||||
// Update bundle offset
|
||||
currentBundle.offset = offset;
|
||||
}
|
||||
|
||||
// Update bundle header
|
||||
currentBundle.view.setUint32(0, 0x1C | ((currentBundle.offset - 4) << 8), true);
|
||||
}
|
||||
|
||||
// Combine buffers into final Uint8Array
|
||||
const finalLength = buffers.reduce((sum, b) => sum + b.offset, 0);
|
||||
const result = new Uint8Array(finalLength);
|
||||
let offset = 0;
|
||||
for (const { buffer, offset: bundleOffset } of buffers) {
|
||||
result.set(new Uint8Array(buffer, 0, bundleOffset), offset);
|
||||
offset += bundleOffset;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function writeTypedValue(view, offset, value, type) {
|
||||
switch (type) {
|
||||
case Int8Array: view.setInt8(offset, value); break;
|
||||
case Uint8Array: view.setUint8(offset, value); break;
|
||||
case Int16Array: view.setInt16(offset, value, true); break;
|
||||
case Uint16Array: view.setUint16(offset, value, true); break;
|
||||
case Int32Array: view.setInt32(offset, value, true); break;
|
||||
case Uint32Array: view.setUint32(offset, value, true); break;
|
||||
case Float32Array: view.setFloat32(offset, value, true); break;
|
||||
case Float64Array: view.setFloat64(offset, value, true); break;
|
||||
case BigInt64Array: view.setBigInt64(offset, BigInt(value), true); break;
|
||||
case BigUint64Array: view.setBigUint64(offset, BigInt(value), true); break;
|
||||
default: throw new Error(`Unsupported type: ${type.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { sequential, interleaved };
|
||||
139
lib/modules/@dougal/binary/index.js
Normal file
139
lib/modules/@dougal/binary/index.js
Normal file
@@ -0,0 +1,139 @@
|
||||
|
||||
/** Binary encoder
|
||||
*
|
||||
* This module encodes scalar data from a grid-like source
|
||||
* into a packed binary format for bandwidth efficiency and
|
||||
* speed of access.
|
||||
*
|
||||
* Data are indexed by i & j values, with "i" being constant
|
||||
* (e.g., a sequence or line number) and "j" expected to change
|
||||
* by a constant, linear amount (e.g., point numbers). All data
|
||||
* from consecutive "j" values will be encoded as a single array
|
||||
* (or series of arrays if multiple values are encoded).
|
||||
* If there is a jump in the "j" progression, a new "chunk" will
|
||||
* be started with a new array (or series of arrays).
|
||||
*
|
||||
* Multiple values may be encoded per (i, j) pair, using any of
|
||||
* the types supported by JavaScript's TypedArray except for
|
||||
* Float16 and Uint8Clamped. Each variable can be encoded with
|
||||
* a different size.
|
||||
*
|
||||
* Values may be encoded directly or as deltas from an initial
|
||||
* value. The latter is particularly efficient when dealing with
|
||||
* monotonically incrementing data, such as timestamps.
|
||||
*
|
||||
* The conceptual packet format for sequentially encoded data
|
||||
* looks like this:
|
||||
*
|
||||
* <msg-type> <count: x> <i> <j0> <Δj>
|
||||
*
|
||||
* <Δelement_count: y>
|
||||
* <element_count: z>
|
||||
*
|
||||
* <Δelement_1_type_base> … <Δelement_y_type_base>
|
||||
* <Δelement_1_type_incr> … <Δelement_y_type_incr>
|
||||
* <elem_1_type> … <elem_z_type>
|
||||
*
|
||||
* <Δelement_1_first> … <Δelement_z_first>
|
||||
*
|
||||
* <Δelem_1_0> … <Δelem_1_x>
|
||||
* …
|
||||
* <Δelem_y_0> … <Δelem_y_x>
|
||||
* <elem_1_0> … <elem_1_x>
|
||||
* …
|
||||
* <elem_z_0> … <elem_z_x>
|
||||
*
|
||||
*
|
||||
* The conceptual packet format for interleaved encoded data
|
||||
* looks like this:
|
||||
*
|
||||
*
|
||||
* <msg-type> <count: x> <i> <j0> <Δj>
|
||||
*
|
||||
* <Δelement_count: y>
|
||||
* <element_count: z>
|
||||
*
|
||||
* <Δelement_1_type_base> … <Δelement_y_type_base>
|
||||
* <Δelement_1_type_incr> … <Δelement_y_type_incr>
|
||||
* <elem_1_type> … <elem_z_type>
|
||||
*
|
||||
* <Δelement_1_first> … <Δelement_y_first>
|
||||
*
|
||||
* <Δelem_1_0> <Δelem_2_0> … <Δelem_y_0> <elem_1_0> <elem_2_0> … <elem_z_0>
|
||||
* <Δelem_1_1> <Δelem_2_1> … <Δelem_y_1> <elem_1_1> <elem_2_1> … <elem_z_1>
|
||||
* …
|
||||
* <Δelem_1_x> <Δelem_2_x> … <Δelem_y_x> <elem_1_x> <elem_2_x> … <elem_z_x>
|
||||
*
|
||||
*
|
||||
* Usage example:
|
||||
*
|
||||
* json = [
|
||||
* {
|
||||
* sequence: 7,
|
||||
* sailline: 5354,
|
||||
* line: 5356,
|
||||
* point: 1068,
|
||||
* tstamp: 1695448704372,
|
||||
* objrefraw: 3,
|
||||
* objreffinal: 4
|
||||
* },
|
||||
* {
|
||||
* sequence: 7,
|
||||
* sailline: 5354,
|
||||
* line: 5352,
|
||||
* point: 1070,
|
||||
* tstamp: 1695448693612,
|
||||
* objrefraw: 2,
|
||||
* objreffinal: 3
|
||||
* },
|
||||
* {
|
||||
* sequence: 7,
|
||||
* sailline: 5354,
|
||||
* line: 5356,
|
||||
* point: 1072,
|
||||
* tstamp: 1695448684624,
|
||||
* objrefraw: 3,
|
||||
* objreffinal: 4
|
||||
* }
|
||||
* ];
|
||||
*
|
||||
* deltas = [
|
||||
* { key: el => el.tstamp, baseType: BigUint64Array, incrType: Int16Array }
|
||||
* ];
|
||||
*
|
||||
* elems = [
|
||||
* { key: el => el.objrefraw, type: Uint8Array },
|
||||
* { key: el => el.objreffinal, type: Uint8Array }
|
||||
* ];
|
||||
*
|
||||
* i = el => el.sequence;
|
||||
*
|
||||
* j = el => el.point;
|
||||
*
|
||||
* bundle = encode(json, i, j, deltas, elems);
|
||||
*
|
||||
* // bundle:
|
||||
*
|
||||
* Uint8Array(40) [
|
||||
* 36, 0, 0, 28, 17, 0, 3, 0, 7, 0,
|
||||
* 44, 4, 2, 0, 1, 2, 42, 1, 1, 116,
|
||||
* 37, 158, 192, 138, 1, 0, 0, 0, 0, 0,
|
||||
* 248, 213, 228, 220, 3, 2, 3, 4, 3, 4
|
||||
* ]
|
||||
*
|
||||
* decode(bundle);
|
||||
*
|
||||
* {
|
||||
* i: 7,
|
||||
* j: [ 1068, 1070, 1072 ],
|
||||
* 'Δelems': [ [ 1695448704372, 1695448693612, 1695448684624 ] ],
|
||||
* elems: [ [ 3, 2, 3 ], [ 4, 3, 4 ] ]
|
||||
* }
|
||||
*
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
encode: {...require('./encode')},
|
||||
decode: {...require('./decode')},
|
||||
...require('./classes')
|
||||
};
|
||||
12
lib/modules/@dougal/binary/package.json
Normal file
12
lib/modules/@dougal/binary/package.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "@dougal/binary",
|
||||
"version": "1.0.0",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"description": ""
|
||||
}
|
||||
25
lib/modules/@dougal/concurrency/index.js
Normal file
25
lib/modules/@dougal/concurrency/index.js
Normal file
@@ -0,0 +1,25 @@
|
||||
class ConcurrencyLimiter {
|
||||
|
||||
constructor(maxConcurrent) {
|
||||
this.maxConcurrent = maxConcurrent;
|
||||
this.active = 0;
|
||||
this.queue = [];
|
||||
}
|
||||
|
||||
async enqueue(task) {
|
||||
if (this.active >= this.maxConcurrent) {
|
||||
await new Promise(resolve => this.queue.push(resolve));
|
||||
}
|
||||
this.active++;
|
||||
try {
|
||||
return await task();
|
||||
} finally {
|
||||
this.active--;
|
||||
if (this.queue.length > 0) {
|
||||
this.queue.shift()();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ConcurrencyLimiter;
|
||||
12
lib/modules/@dougal/concurrency/package.json
Normal file
12
lib/modules/@dougal/concurrency/package.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "@dougal/concurrency",
|
||||
"version": "1.0.0",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"description": ""
|
||||
}
|
||||
75
lib/modules/@dougal/organisations/Organisation.js
Normal file
75
lib/modules/@dougal/organisations/Organisation.js
Normal file
@@ -0,0 +1,75 @@
|
||||
|
||||
class Organisation {
|
||||
|
||||
constructor (data) {
|
||||
|
||||
this.read = !!data?.read;
|
||||
this.write = !!data?.write;
|
||||
this.edit = !!data?.edit;
|
||||
|
||||
this.other = {};
|
||||
|
||||
return new Proxy(this, {
|
||||
get (target, prop) {
|
||||
if (prop in target) {
|
||||
return target[prop]
|
||||
} else {
|
||||
return target.other[prop];
|
||||
}
|
||||
},
|
||||
|
||||
set (target, prop, value) {
|
||||
const oldValue = target[prop] !== undefined ? target[prop] : target.other[prop];
|
||||
const newValue = Boolean(value);
|
||||
|
||||
if (["read", "write", "edit"].includes(prop)) {
|
||||
target[prop] = newValue;
|
||||
} else {
|
||||
target.other[prop] = newValue;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
toJSON () {
|
||||
return {
|
||||
read: this.read,
|
||||
write: this.write,
|
||||
edit: this.edit,
|
||||
...this.other
|
||||
}
|
||||
}
|
||||
|
||||
toString (replacer, space) {
|
||||
return JSON.stringify(this.toJSON(), replacer, space);
|
||||
}
|
||||
|
||||
/** Limit the operations to only those allowed by `other`
|
||||
*/
|
||||
filter (other) {
|
||||
const filteredOrganisation = new Organisation();
|
||||
|
||||
filteredOrganisation.read = this.read && other.read;
|
||||
filteredOrganisation.write = this.write && other.write;
|
||||
filteredOrganisation.edit = this.edit && other.edit;
|
||||
|
||||
return filteredOrganisation;
|
||||
}
|
||||
|
||||
intersect (other) {
|
||||
return this.filter(other);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = Organisation; // CJS export
|
||||
}
|
||||
|
||||
// ESM export
|
||||
if (typeof exports !== 'undefined' && !exports.default) {
|
||||
exports.default = Organisation; // ESM export
|
||||
}
|
||||
225
lib/modules/@dougal/organisations/Organisations.js
Normal file
225
lib/modules/@dougal/organisations/Organisations.js
Normal file
@@ -0,0 +1,225 @@
|
||||
const Organisation = require('./Organisation');
|
||||
|
||||
class Organisations {
|
||||
|
||||
#values = {}
|
||||
|
||||
#overlord
|
||||
|
||||
static entries (orgs) {
|
||||
return orgs.names().map(name => [name, orgs.get(name)]);
|
||||
}
|
||||
|
||||
constructor (data, overlord) {
|
||||
if (data instanceof Organisations) {
|
||||
for (const [name, value] of Organisations.entries(data)) {
|
||||
this.set(name, new Organisation(value));
|
||||
}
|
||||
} else if (data instanceof Object) {
|
||||
for (const [name, value] of Object.entries(data)) {
|
||||
this.set(name, new Organisation(value));
|
||||
}
|
||||
} else if (data instanceof String) {
|
||||
this.set(data, new Organisation());
|
||||
} else if (typeof data !== "undefined") {
|
||||
throw new Error("Invalid constructor argument");
|
||||
}
|
||||
|
||||
if (overlord) {
|
||||
this.#overlord = overlord;
|
||||
}
|
||||
}
|
||||
|
||||
get values () {
|
||||
return this.#values;
|
||||
}
|
||||
|
||||
get length () {
|
||||
return this.names().length;
|
||||
}
|
||||
|
||||
get overlord () {
|
||||
return this.#overlord;
|
||||
}
|
||||
|
||||
set overlord (v) {
|
||||
this.#overlord = new Organisations(v);
|
||||
}
|
||||
|
||||
/** Get the operations for `name`
|
||||
*/
|
||||
get (name) {
|
||||
const key = Object.keys(this.values).find( k => k.toLowerCase() == name.toLowerCase() ) ?? name;
|
||||
return this.values[key];
|
||||
}
|
||||
|
||||
/** Set the operations for `name` to `value`
|
||||
*
|
||||
* If we have an overlord, ensure we cannot:
|
||||
*
|
||||
* 1. Add new organisations which the overlord
|
||||
* is not a member of
|
||||
* 2. Access operations that the overlord is not
|
||||
* allowed to access
|
||||
*/
|
||||
set (name, value) {
|
||||
name = String(name).trim();
|
||||
const key = Object.keys(this.values).find( k => k.toLowerCase() == name.toLowerCase() ) ?? name;
|
||||
const org = new Organisation(value);
|
||||
|
||||
if (this.overlord) {
|
||||
const parent = this.overlord.get(key) ?? this.overlord.get("*");
|
||||
if (parent) {
|
||||
this.values[key] = parent.filter(org);
|
||||
}
|
||||
} else {
|
||||
this.values[key] = new Organisation(value);
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Enable the operation `op` in all organisations
|
||||
*/
|
||||
enableOperation (op) {
|
||||
if (this.overlord) {
|
||||
Object.keys(this.#values)
|
||||
.filter( key => (this.overlord.get(key) ?? this.overlord.get("*"))?.[op] )
|
||||
.forEach( key => this.#values[key][op] = true );
|
||||
} else {
|
||||
Object.values(this.#values).forEach( org => org[op] = true );
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Disable the operation `op` in all organisations
|
||||
*/
|
||||
disableOperation (op) {
|
||||
Object.values(this.#values).forEach( org => org[op] = false );
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Create a new organisation object limited by the caller's rights
|
||||
*
|
||||
* The spawned Organisations instance will have the same organisations
|
||||
* and rights as the caller minus the applied `mask`. With the default
|
||||
* mask, the spawned object will inherit all rights except for `edit`
|
||||
* rights.
|
||||
*
|
||||
* The "*" organisation must be explicitly assigned. It is not inherited.
|
||||
*/
|
||||
spawn (mask = {read: true, write: true, edit: false}) {
|
||||
|
||||
const parent = new Organisations();
|
||||
const wildcard = this.get("*").edit; // If true, we can spawn everywhere
|
||||
|
||||
this.entries().forEach( ([k, v]) => {
|
||||
// if (k != "*") { // This organisation is not inherited
|
||||
if (v.edit || wildcard) { // We have the right to spawn in this organisation
|
||||
const o = new Organisation({
|
||||
read: v.read && mask.read,
|
||||
write: v.write && mask.write,
|
||||
edit: v.edit && mask.edit
|
||||
});
|
||||
parent.set(k, o);
|
||||
}
|
||||
// }
|
||||
});
|
||||
|
||||
return new Organisations({}, parent);
|
||||
}
|
||||
|
||||
remove (name) {
|
||||
const key = Object.keys(this.values).find( k => k.toLowerCase() == name.toLowerCase() ) ?? name;
|
||||
delete this.values[key];
|
||||
}
|
||||
|
||||
/** Return the list of organisation names
|
||||
*/
|
||||
names () {
|
||||
return Object.keys(this.values);
|
||||
}
|
||||
|
||||
/** Same as this.get(name)
|
||||
*/
|
||||
value (name) {
|
||||
return this.values[name];
|
||||
}
|
||||
|
||||
/** Same as Object.prototype.entries
|
||||
*/
|
||||
entries () {
|
||||
return this.names().map( name => [ name, this.value(name) ] );
|
||||
}
|
||||
|
||||
/** Return true if the named organisation is present
|
||||
*/
|
||||
has (name) {
|
||||
return Boolean(this.value(name));
|
||||
}
|
||||
|
||||
/** Return only those of our organisations
|
||||
* and operations present in `other`
|
||||
*/
|
||||
filter (other) {
|
||||
const filteredOrganisations = new Organisations();
|
||||
|
||||
const wildcard = other.value("*");
|
||||
|
||||
for (const [name, org] of this.entries()) {
|
||||
const ownOrg = other.value(name) ?? wildcard;
|
||||
if (ownOrg) {
|
||||
filteredOrganisations.set(name, org.filter(ownOrg))
|
||||
}
|
||||
}
|
||||
|
||||
return filteredOrganisations;
|
||||
}
|
||||
|
||||
/** Return only those organisations
|
||||
* that have access to the required
|
||||
* operation
|
||||
*/
|
||||
accessToOperation (op) {
|
||||
const filteredOrganisations = new Organisations();
|
||||
|
||||
for (const [name, org] of this.entries()) {
|
||||
if (org[op]) {
|
||||
filteredOrganisations.set(name, org);
|
||||
}
|
||||
}
|
||||
|
||||
return filteredOrganisations;
|
||||
}
|
||||
|
||||
toJSON () {
|
||||
const obj = {};
|
||||
for (const key in this.values) {
|
||||
obj[key] = this.values[key].toJSON();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
toString (replacer, space) {
|
||||
return JSON.stringify(this.toJSON(), replacer, space);
|
||||
}
|
||||
|
||||
*[Symbol.iterator] () {
|
||||
for (const [name, operations] of this.entries()) {
|
||||
yield {name, operations};
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = Organisations; // CJS export
|
||||
}
|
||||
|
||||
// ESM export
|
||||
if (typeof exports !== 'undefined' && !exports.default) {
|
||||
exports.default = Organisations; // ESM export
|
||||
}
|
||||
5
lib/modules/@dougal/organisations/index.js
Normal file
5
lib/modules/@dougal/organisations/index.js
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
module.exports = {
|
||||
Organisation: require('./Organisation'),
|
||||
Organisations: require('./Organisations')
|
||||
}
|
||||
12
lib/modules/@dougal/organisations/package.json
Normal file
12
lib/modules/@dougal/organisations/package.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "@dougal/organisations",
|
||||
"version": "1.0.0",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"description": ""
|
||||
}
|
||||
364
lib/modules/@dougal/user/User.js
Normal file
364
lib/modules/@dougal/user/User.js
Normal file
@@ -0,0 +1,364 @@
|
||||
const EventEmitter = require('events');
|
||||
const { Organisations } = require('@dougal/organisations');
|
||||
|
||||
function randomUUID () {
|
||||
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
|
||||
const r = Math.random() * 16 | 0;
|
||||
const v = c === 'x' ? r : (r & 0x3 | 0x8);
|
||||
return v.toString(16);
|
||||
});
|
||||
}
|
||||
|
||||
class User extends EventEmitter {
|
||||
|
||||
// Valid field names
|
||||
static fields = [ "ip", "host", "name", "email", "description", "colour", "active", "organisations", "meta" ]
|
||||
|
||||
static validUUID (str) {
|
||||
const uuidv4Rx = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
|
||||
return uuidv4Rx.test(str);
|
||||
}
|
||||
|
||||
static validIPv4 (str) {
|
||||
const ipv4Rx = /^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$/;
|
||||
return ipv4Rx.test(str);
|
||||
}
|
||||
|
||||
static validIPv6 (str) {
|
||||
const ipv6Rx = /^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?::[0-9a-fA-F]{1,4}){1,6}|:((?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?))|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?))))$/;
|
||||
return ipv6Rx.test(str);
|
||||
}
|
||||
|
||||
static validHostname (str) {
|
||||
const hostnameRx = /^(?=.{1,253}$)(?:(?!-)[A-Za-z0-9-]{1,63}(?<!-)\.)+[A-Za-z]{2,}$/;
|
||||
return hostnameRx.test(str);
|
||||
}
|
||||
|
||||
#setString (k, v) {
|
||||
if (typeof v === "undefined") {
|
||||
this.values[k] = v;
|
||||
} else {
|
||||
this.values[k] = String(v).trim();
|
||||
}
|
||||
this.emit("changed", k, v);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
#updateTimestamp (v) {
|
||||
if (typeof v === "undefined") {
|
||||
this.#timestamp = (new Date()).valueOf();
|
||||
} else {
|
||||
this.#timestamp = (new Date(v)).valueOf();
|
||||
}
|
||||
this.emit("last_modified", this.#timestamp);
|
||||
}
|
||||
|
||||
// Create a new instance of `other`, where `other` is
|
||||
// an instance of User or of a derived class
|
||||
#clone (other = this) {
|
||||
const clone = new this.constructor();
|
||||
Object.assign(clone.values, other.values);
|
||||
clone.organisations = new Organisations(other.organisations);
|
||||
return clone;
|
||||
}
|
||||
|
||||
values = {}
|
||||
|
||||
#timestamp
|
||||
|
||||
constructor (data) {
|
||||
super();
|
||||
|
||||
User.fields.forEach( f => this[f] = data?.[f] );
|
||||
this.values.id = data?.id ?? randomUUID();
|
||||
this.values.active = !!this.active;
|
||||
this.values.hash = data?.hash;
|
||||
this.values.password = data?.password;
|
||||
this.values.organisations = new Organisations(data?.organisations);
|
||||
this.#updateTimestamp(data?.last_modified);
|
||||
}
|
||||
|
||||
/*
|
||||
* Getters
|
||||
*/
|
||||
|
||||
get id () { return this.values.id }
|
||||
|
||||
get ip () { return this.values.ip }
|
||||
|
||||
get host () { return this.values.host }
|
||||
|
||||
get name () { return this.values.name }
|
||||
|
||||
get email () { return this.values.email }
|
||||
|
||||
get description () { return this.values.description }
|
||||
|
||||
get colour () { return this.values.colour }
|
||||
|
||||
get active () { return this.values.active }
|
||||
|
||||
get organisations () { return this.values.organisations }
|
||||
|
||||
get password () { return this.values.password }
|
||||
|
||||
get timestamp () { return new Date(this.#timestamp) }
|
||||
|
||||
/*
|
||||
* Setters
|
||||
*/
|
||||
|
||||
set id (v) {
|
||||
if (typeof v === "undefined") {
|
||||
this.values.id = randomUUID();
|
||||
} else if (User.validUUID(v)) {
|
||||
this.values.id = v;
|
||||
} else {
|
||||
throw new Error("Invalid ID format (must be UUIDv4)");
|
||||
}
|
||||
this.emit("changed", "id", this.values.id);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
set ip (v) {
|
||||
if (User.validIPv4(v) || User.validIPv6(v) || typeof v === "undefined") {
|
||||
this.values.ip = v;
|
||||
} else {
|
||||
throw new Error("Invalid IP address or subnet");
|
||||
}
|
||||
this.emit("changed", "ip", this.values.ip);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
set host (v) {
|
||||
if (User.validHostname(v) || typeof v === "undefined") {
|
||||
this.values.host = v;
|
||||
} else {
|
||||
throw new Error("Invalid hostname");
|
||||
}
|
||||
this.emit("changed", "host", this.values.host);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
set name (v) {
|
||||
this.#setString("name", v);
|
||||
}
|
||||
|
||||
set email (v) {
|
||||
// TODO should validate, buy hey!
|
||||
this.#setString("email", v);
|
||||
}
|
||||
|
||||
set description (v) {
|
||||
this.#setString("description", v);
|
||||
}
|
||||
|
||||
set colour (v) {
|
||||
this.#setString("colour", v);
|
||||
}
|
||||
|
||||
set active (v) {
|
||||
this.values.active = !!v;
|
||||
this.emit("changed", "active", this.values.active);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
set organisations (v) {
|
||||
this.values.organisations = new Organisations(v);
|
||||
this.emit("changed", "organisations", this.values.organisations);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
set password (v) {
|
||||
this.values.password = v;
|
||||
this.emit("changed", "password", this.values.password);
|
||||
this.#updateTimestamp();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Validation methods
|
||||
*/
|
||||
|
||||
get errors () {
|
||||
let err = [];
|
||||
|
||||
if (!this.id) err.push("ERR_NO_ID");
|
||||
if (!this.name) err.push("ERR_NO_NAME");
|
||||
if (!this.organisations.length) err.push("ERR_NO_ORG");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
get isValid () {
|
||||
return this.errors.length == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Filtering methods
|
||||
*/
|
||||
|
||||
filter (other) {
|
||||
// const filteredUser = new User(this);
|
||||
const filteredUser = this.#clone();
|
||||
filteredUser.organisations = this.organisations.filter(other.organisations);
|
||||
return filteredUser;
|
||||
}
|
||||
|
||||
/** Return users that are visible to me.
|
||||
*
|
||||
* These are users with which at leas one common organisation
|
||||
* with read, write or delete access to.
|
||||
*
|
||||
* If we are wildcarded ("*"), we see everyone.
|
||||
*
|
||||
* If a peer is wildcarded, they can be seen by everone.
|
||||
*/
|
||||
peers (list) {
|
||||
if (this.organisations.value("*")) {
|
||||
return list;
|
||||
} else {
|
||||
return list.filter( user => this.canRead(user) );
|
||||
// return list.filter( user =>
|
||||
// user.organisations.value("*") ||
|
||||
// user.organisations.filter(this.organisations).length > 0
|
||||
// this.organisations.filter(user.organisations).length > 0
|
||||
// );
|
||||
}
|
||||
}
|
||||
|
||||
/** Return users that I can edit
|
||||
*
|
||||
* These users must belong to an organisation
|
||||
* over which I have edit rights.
|
||||
*
|
||||
* If we are edit wildcarded, we can edit everyone.
|
||||
*/
|
||||
editablePeers (list) {
|
||||
const editableOrgs = this.organisations.accessToOperation("edit");
|
||||
if (editableOrgs.value("*")) {
|
||||
return list;
|
||||
} else {
|
||||
return list.filter( user => this.canEdit(user) );
|
||||
// editableOrgs.filter(user.organisations).length > 0
|
||||
// );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* General methods
|
||||
*/
|
||||
|
||||
/** Return `true` if we are `other`
|
||||
*/
|
||||
is (other) {
|
||||
return this.id == other.id;
|
||||
}
|
||||
|
||||
canDo (operation, other) {
|
||||
if (this.organisations.get('*')?.[operation])
|
||||
return true;
|
||||
|
||||
if (other instanceof User) {
|
||||
return other.organisations.names().some(name => this.organisations.get(name)?.[operation]);
|
||||
} else if (other instanceof Organisations) {
|
||||
return other.accessToOperation(operation).names().some(name => this.organisations.get(name)?.[operation]);
|
||||
} else if (other?.organisations) {
|
||||
return this.canDo(operation, new Organisations(other.organisations));
|
||||
} else if (other instanceof Object) {
|
||||
return this.canDo(operation, new Organisations(other));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
canRead (other) {
|
||||
return this.canDo("read", other);
|
||||
}
|
||||
|
||||
canWrite (other) {
|
||||
return this.canDo("write", other);
|
||||
}
|
||||
|
||||
canEdit (other) {
|
||||
return this.canDo("edit", other);
|
||||
}
|
||||
|
||||
/** Perform an edit on another user
|
||||
*
|
||||
* Syntax: user.edit(other).to(another);
|
||||
*
|
||||
* Applies to `other` the changes described in `another`
|
||||
* that are permitted to `user`. The argument `another`
|
||||
* must be a plain object (not a `User` instance) with
|
||||
* only the properties that are to be changed.
|
||||
*
|
||||
* NOTE: Organisations are not merged, they are overwritten
|
||||
* and then filtered to ensure that the edited user does not
|
||||
* gain more privileges than those granted to the editing
|
||||
* user.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* // This causes user test77 to set user x23 to
|
||||
* // inactive
|
||||
* test77.edit(x23).to({active: false})
|
||||
*/
|
||||
edit (other) {
|
||||
if (this.canEdit(other)) {
|
||||
return {
|
||||
to: (another) => {
|
||||
const newUser = Object.assign(this.#clone(other), another);
|
||||
return newUser.filter(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Do not fail or throw but return undefined
|
||||
}
|
||||
|
||||
/** Create a new user similar to us except it doesn't have `edit` rights
|
||||
* by default
|
||||
*/
|
||||
spawn (init = {}, mask = {read: true, write: true, edit: false}) {
|
||||
// const user = new User(init);
|
||||
const user = this.#clone(init);
|
||||
user.organisations = this.organisations.accessToOperation("edit").disableOperation("edit");
|
||||
user.organisations.overlord = this.organisations;
|
||||
return user;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Conversion and presentation methods
|
||||
*/
|
||||
|
||||
toJSON () {
|
||||
return {
|
||||
id: this.id,
|
||||
ip: this.ip,
|
||||
host: this.host,
|
||||
name: this.name,
|
||||
email: this.email,
|
||||
description: this.description,
|
||||
colour: this.colour,
|
||||
active: this.active,
|
||||
organisations: this.organisations.toJSON(),
|
||||
password: this.password
|
||||
}
|
||||
}
|
||||
|
||||
toString (replacer, space) {
|
||||
return JSON.stringify(this.toJSON(), replacer, space);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = User; // CJS export
|
||||
}
|
||||
|
||||
// ESM export
|
||||
if (typeof exports !== 'undefined' && !exports.default) {
|
||||
exports.default = User; // ESM export
|
||||
}
|
||||
4
lib/modules/@dougal/user/index.js
Normal file
4
lib/modules/@dougal/user/index.js
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
module.exports = {
|
||||
User: require('./User')
|
||||
}
|
||||
15
lib/modules/@dougal/user/package.json
Normal file
15
lib/modules/@dougal/user/package.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "@dougal/user",
|
||||
"version": "1.0.0",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"dependencies": {
|
||||
"@dougal/organisations": "file:../organisations"
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,9 @@
|
||||
module.exports = {
|
||||
presets: [
|
||||
'@vue/cli-plugin-babel/preset'
|
||||
],
|
||||
plugins: [
|
||||
'@babel/plugin-proposal-logical-assignment-operators',
|
||||
'@babel/plugin-transform-private-methods'
|
||||
]
|
||||
}
|
||||
|
||||
10924
lib/www/client/source/package-lock.json
generated
10924
lib/www/client/source/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -3,30 +3,49 @@
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"serve": "vue-cli-service serve",
|
||||
"serve": "vue-cli-service serve --host=0.0.0.0",
|
||||
"build": "vue-cli-service build"
|
||||
},
|
||||
"dependencies": {
|
||||
"@mdi/font": "^5.6.55",
|
||||
"@deck.gl/aggregation-layers": "^9.1.13",
|
||||
"@deck.gl/geo-layers": "^9.1.13",
|
||||
"@deck.gl/mesh-layers": "^9.1.14",
|
||||
"@dougal/binary": "file:../../../modules/@dougal/binary",
|
||||
"@dougal/concurrency": "file:../../../modules/@dougal/concurrency",
|
||||
"@dougal/organisations": "file:../../../modules/@dougal/organisations",
|
||||
"@dougal/user": "file:../../../modules/@dougal/user",
|
||||
"@loaders.gl/obj": "^4.3.4",
|
||||
"@mdi/font": "^7.2.96",
|
||||
"buffer": "^6.0.3",
|
||||
"core-js": "^3.6.5",
|
||||
"csv-parse": "^5.5.2",
|
||||
"d3": "^7.0.1",
|
||||
"jwt-decode": "^3.0.0",
|
||||
"leaflet": "^1.7.1",
|
||||
"leaflet-arrowheads": "^1.2.2",
|
||||
"leaflet-realtime": "^2.2.0",
|
||||
"leaflet.markercluster": "^1.4.1",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"marked": "^9.1.4",
|
||||
"path-browserify": "^1.0.1",
|
||||
"plotly.js-dist": "^2.27.0",
|
||||
"suncalc": "^1.8.0",
|
||||
"typeface-roboto": "0.0.75",
|
||||
"vue": "^2.6.12",
|
||||
"vue-debounce": "^2.5.7",
|
||||
"vue-router": "^3.4.5",
|
||||
"vuetify": "^2.3.12",
|
||||
"vuex": "^3.5.1"
|
||||
"vue-debounce": "^2.6.0",
|
||||
"vue-router": "^3.5.1",
|
||||
"vuetify": "^2.5.0",
|
||||
"vuex": "^3.6.2",
|
||||
"yaml": "^2.3.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vue/cli-plugin-babel": "~4.4.0",
|
||||
"@vue/cli-plugin-router": "~4.4.0",
|
||||
"@vue/cli-plugin-vuex": "~4.4.0",
|
||||
"@vue/cli-service": "~4.4.0",
|
||||
"sass": "^1.26.11",
|
||||
"@babel/plugin-proposal-logical-assignment-operators": "^7.14.5",
|
||||
"@babel/plugin-transform-private-methods": "^7.27.1",
|
||||
"@vue/cli-plugin-babel": "^5.0.8",
|
||||
"@vue/cli-plugin-router": "^5.0.8",
|
||||
"@vue/cli-plugin-vuex": "^5.0.8",
|
||||
"@vue/cli-service": "^5.0.8",
|
||||
"sass": "~1.32",
|
||||
"sass-loader": "^8.0.0",
|
||||
"stylus": "^0.54.8",
|
||||
"stylus-loader": "^3.0.2",
|
||||
|
||||
406982
lib/www/client/source/public/assets/boat0.obj
Normal file
406982
lib/www/client/source/public/assets/boat0.obj
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user