Compare commits

..

2 Commits

Author SHA1 Message Date
D. Berge
1f8157cc5a Add client-side RTC logic.
It tries to grab the microphone and open a
connection to every other user as soon as the
websocket connection is made. It shows audio
controls for every connection at the bottom
of the page for debugging purposes.
2020-10-10 12:07:31 +02:00
D. Berge
b72b55c6dd Add server-side RTC signalling.
Basic implementation. It assigns a random ID
to every connection and passes details of every
connection to everyone else.
2020-10-10 12:05:49 +02:00
606 changed files with 16523 additions and 482111 deletions

3
.gitignore vendored
View File

@@ -10,6 +10,3 @@ lib/www/client/source/dist/
lib/www/client/dist/
etc/surveys/*.yaml
!etc/surveys/_*.yaml
etc/ssl/*
etc/config.yaml
var/*

View File

@@ -1,27 +0,0 @@
#!/usr/bin/python3
"""
Check if any of the directories provided in the imports.mounts configuration
section are empty.
Returns 0 if all arguments are non-empty, 1 otherwise. It stops at the first
empty directory.
"""
import os
import configuration
cfg = configuration.read()
if cfg and "imports" in cfg and "mounts" in cfg["imports"]:
mounts = cfg["imports"]["mounts"]
for item in mounts:
with os.scandir(item) as contents:
if not any(contents):
exit(1)
else:
print("No mounts in configuration")
exit(0)

View File

@@ -1,5 +1,4 @@
import os
import pathlib
from glob import glob
from yaml import full_load as _load
@@ -12,18 +11,6 @@ surveys should be under $HOME/etc/surveys/*.yaml. In both cases,
$HOME is the home directory of the user running this script.
"""
def is_relative_to(it, other):
"""
is_relative_to() is not present version Python 3.9, so we
need this kludge to get Dougal to run on OpenSUSE 15.4
"""
if "is_relative_to" in dir(it):
return it.is_relative_to(other)
return str(it.absolute()).startswith(str(other.absolute()))
prefix = os.environ.get("DOUGAL_ROOT", os.environ.get("HOME", ".")+"/software")
DOUGAL_ROOT = os.environ.get("DOUGAL_ROOT", os.environ.get("HOME", ".")+"/software")
@@ -67,10 +54,6 @@ def files (globspec = None, include_archived = False):
quickly and temporarily “disabling” a survey configuration by renaming
the relevant file.
"""
print("This method is obsolete")
return
tuples = []
if globspec is None:
@@ -104,73 +87,3 @@ def rxflags (flagstr):
for flag in flagstr:
flags |= cases.get(flag, 0)
return flags
def translate_path (file):
"""
Translate a path from a Dougal import directory to an actual
physical path on disk.
Any user files accessible by Dougal must be under a path prefixed
by `(config.yaml).imports.paths`. The value of `imports.paths` may
be either a string, in which case this represents the prefix under
which all Dougal data resides, or a dictionary where the keys are
logical paths and their values the corresponding physical path.
"""
cfg = read()
root = pathlib.Path(DOUGAL_ROOT)
filepath = pathlib.Path(file).resolve()
import_paths = cfg["imports"]["paths"]
if filepath.is_absolute():
if type(import_paths) == str:
# Substitute the root for the real physical path
# NOTE: `root` deals with import_paths not being absolute
prefix = root.joinpath(pathlib.Path(import_paths)).resolve()
return str(pathlib.Path(prefix).joinpath(*filepath.parts[2:]))
else:
# Look for a match on the second path element
if filepath.parts[1] in import_paths:
# NOTE: `root` deals with import_paths[…] not being absolute
prefix = root.joinpath(import_paths[filepath.parts[1]])
return str(pathlib.Path(prefix).joinpath(*filepath.parts[2:]))
else:
# This path is invalid
raise TypeError("invalid path or file: {0!r}".format(filepath))
else:
# A relative filepath is always resolved relative to the logical root
root = pathlib.Path("/")
return translate_path(root.joinpath(filepath))
def untranslate_path (file):
"""
Attempt to convert a physical path into a logical one.
See `translate_path()` above for details.
"""
cfg = read()
dougal_root = pathlib.Path(DOUGAL_ROOT)
filepath = pathlib.Path(file).resolve()
import_paths = cfg["imports"]["paths"]
physical_root = pathlib.Path("/")
if filepath.is_absolute():
if type(import_paths) == str:
if is_relative_to(filepath, import_paths):
physical_root = pathlib.Path("/")
physical_prefix = pathlib.Path(import_paths)
return str(root.joinpath(filepath.relative_to(physical_prefix)))
else:
raise TypeError("invalid path or file: {0!r}".format(filepath))
else:
for key, value in import_paths.items():
value = dougal_root.joinpath(value)
physical_prefix = pathlib.Path(value)
if is_relative_to(filepath, physical_prefix):
logical_prefix = physical_root.joinpath(pathlib.Path(key)).resolve()
return str(logical_prefix.joinpath(filepath.relative_to(physical_prefix)))
# If we got here with no matches, this is not a valid
# Dougal data path
raise TypeError("invalid path or file: {0!r}".format(filepath))
else:
# A relative filepath is always resolved relative to DOUGAL_ROOT
return untranslate_path(root.joinpath(filepath))

View File

@@ -10,7 +10,7 @@
# be known to the database.
# * PROJECT_NAME is a more descriptive name for human consumption.
# * EPSG_CODE is the EPSG code identifying the CRS for the grid data in the
# navigation files, e.g., 23031.
# navigation files, e.g., 32031.
#
# In addition to this, certain other parameters may be controlled via
# environment variables:

View File

@@ -1,26 +0,0 @@
#!/usr/bin/python3
"""
Do daily housekeeping on the database.
This is meant to run shortly after midnight every day.
"""
import configuration
from datastore import Datastore
if __name__ == '__main__':
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
print("Reading surveys")
for survey in surveys:
print(f'Survey: {survey["id"]} ({survey["schema"]})')
db.set_survey(survey["schema"])
print("Daily tasks")
db.run_daily_tasks()
print("Done")

View File

@@ -4,7 +4,6 @@ import psycopg2
import configuration
import preplots
import p111
from hashlib import md5 # Because it's good enough
"""
Interface to the PostgreSQL database.
@@ -12,16 +11,13 @@ Interface to the PostgreSQL database.
def file_hash(file):
"""
Calculate a file hash based on its name, size, modification and creation times.
Calculate a file hash based on its size, inode, modification and creation times.
The hash is used to uniquely identify files in the database and detect if they
have changed.
"""
h = md5()
h.update(file.encode())
name_digest = h.hexdigest()[:16]
st = os.stat(file)
return ":".join([str(v) for v in [st.st_size, st.st_mtime, st.st_ctime, name_digest]])
return ":".join([str(v) for v in [st.st_size, st.st_mtime, st.st_ctime, st.st_ino]])
class Datastore:
"""
@@ -52,7 +48,7 @@ class Datastore:
self.conn = psycopg2.connect(configuration.read()["db"]["connection_string"], **opts)
def set_autocommit(self, value = True):
def set_autocommit(value = True):
"""
Enable or disable autocommit.
@@ -95,7 +91,7 @@ class Datastore:
cursor.execute(qry, (filepath,))
results = cursor.fetchall()
if len(results):
return (filepath, file_hash(configuration.translate_path(filepath))) in results
return (filepath, file_hash(filepath)) in results
def add_file(self, path, cursor = None):
@@ -107,8 +103,7 @@ class Datastore:
else:
cur = cursor
realpath = configuration.translate_path(path)
hash = file_hash(realpath)
hash = file_hash(path)
qry = "CALL add_file(%s, %s);"
cur.execute(qry, (path, hash))
if cursor is None:
@@ -177,7 +172,7 @@ class Datastore:
else:
cur = cursor
hash = file_hash(configuration.translate_path(path))
hash = file_hash(path)
qry = """
UPDATE raw_lines rl
SET ntbp = %s
@@ -256,78 +251,6 @@ class Datastore:
self.maybe_commit()
def save_preplot_line_info(self, lines, filepath, filedata = None):
"""
Save preplot line information
Arguments:
lines (iterable): should be a collection of lines returned from
one of the line info reading functions (see preplots.py).
filepath (string): the full path to the preplot file from where the lines
have been read. It will be added to the survey's `file` table so that
it can be monitored for changes.
"""
with self.conn.cursor() as cursor:
cursor.execute("BEGIN;")
# Check which preplot lines we actually have already imported,
# as the line info file may contain extra lines.
qry = """
SELECT line, class
FROM preplot_lines
ORDER BY line, class;
"""
cursor.execute(qry)
preplot_lines = cursor.fetchall()
hash = self.add_file(filepath, cursor)
count=0
for line in lines:
count += 1
if not (line["sail_line"], "V") in preplot_lines:
print(f"\u001b[2KSkipping line {count} / {len(lines)}", end="\n", flush=True)
continue
print(f"\u001b[2KSaving line {count} / {len(lines)} ", end="\n", flush=True)
sail_line = line["sail_line"]
incr = line.get("incr", True)
ntba = line.get("ntba", False)
remarks = line.get("remarks", None)
meta = json.dumps(line.get("meta", {}))
source_lines = line.get("source_line", [])
for source_line in source_lines:
qry = """
INSERT INTO preplot_saillines AS ps
(sailline, line, sailline_class, line_class, incr, ntba, remarks, meta, hash)
VALUES
(%s, %s, 'V', 'S', %s, %s, %s, %s, %s)
ON CONFLICT (sailline, sailline_class, line, line_class, incr) DO UPDATE
SET
incr = EXCLUDED.incr,
ntba = EXCLUDED.ntba,
remarks = COALESCE(EXCLUDED.remarks, ps.remarks),
meta = ps.meta || EXCLUDED.meta,
hash = EXCLUDED.hash;
"""
# NOTE Consider using cursor.executemany() instead. Then again,
# we're only expecting a few hundred lines at most.
cursor.execute(qry, (sail_line, source_line, incr, ntba, remarks, meta, hash))
if filedata is not None:
self.save_file_data(filepath, json.dumps(filedata), cursor)
self.maybe_commit()
def save_raw_p190(self, records, fileinfo, filepath, epsg = 0, filedata = None, ntbp = False):
"""
Save raw P1 data.
@@ -467,40 +390,20 @@ class Datastore:
with self.conn.cursor() as cursor:
cursor.execute("BEGIN;")
hash = self.add_file(filepath, cursor)
if not records or len(records) == 0:
print("File has no records (or none have been detected)")
# We add the file to the database anyway to signal that we have
# actually seen it.
self.maybe_commit()
return
incr = p111.point_number(records[0]) <= p111.point_number(records[-1])
# Start by deleting any online data we may have for this sequence
self.del_hash("*online*", cursor)
qry = """
INSERT INTO raw_lines (sequence, line, remarks, ntbp, incr, meta)
VALUES (%s, %s, '', %s, %s, %s)
ON CONFLICT (sequence) DO UPDATE SET
line = EXCLUDED.line,
ntbp = EXCLUDED.ntbp,
incr = EXCLUDED.incr,
meta = EXCLUDED.meta;
INSERT INTO raw_lines (sequence, line, remarks, ntbp, incr)
VALUES (%s, %s, '', %s, %s)
ON CONFLICT DO NOTHING;
"""
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"], ntbp, incr, json.dumps(fileinfo["meta"])))
qry = """
UPDATE raw_lines
SET meta = meta || %s
WHERE sequence = %s;
"""
cursor.execute(qry, (json.dumps(fileinfo["meta"]), fileinfo["sequence"]))
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"], ntbp, incr))
qry = """
INSERT INTO raw_lines_files (sequence, hash)
@@ -533,26 +436,16 @@ class Datastore:
with self.conn.cursor() as cursor:
cursor.execute("BEGIN;")
hash = self.add_file(filepath, cursor)
qry = """
INSERT INTO final_lines (sequence, line, remarks, meta)
VALUES (%s, %s, '', %s)
ON CONFLICT (sequence) DO UPDATE SET
line = EXCLUDED.line,
meta = EXCLUDED.meta;
INSERT INTO final_lines (sequence, line, remarks)
VALUES (%s, %s, '')
ON CONFLICT DO NOTHING;
"""
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"], json.dumps(fileinfo["meta"])))
qry = """
UPDATE raw_lines
SET meta = meta || %s
WHERE sequence = %s;
"""
cursor.execute(qry, (json.dumps(fileinfo["meta"]), fileinfo["sequence"]))
cursor.execute(qry, (fileinfo["sequence"], fileinfo["line"]))
qry = """
INSERT INTO final_lines_files (sequence, hash)
@@ -579,8 +472,6 @@ class Datastore:
if filedata is not None:
self.save_file_data(filepath, json.dumps(filedata), cursor)
cursor.execute("CALL final_line_post_import(%s);", (fileinfo["sequence"],))
self.maybe_commit()
def save_raw_smsrc (self, records, fileinfo, filepath, filedata = None):
@@ -615,7 +506,7 @@ class Datastore:
qry = """
UPDATE raw_shots
SET meta = jsonb_set(meta, '{smsrc}', %s::jsonb, true) - 'qc'
SET meta = jsonb_set(meta, '{smsrc}', %s::jsonb, true)
WHERE sequence = %s AND point = %s;
"""
@@ -661,68 +552,7 @@ class Datastore:
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction
def get_file_data(self, path, cursor = None):
"""
Retrieve arbitrary data associated with a file.
"""
if cursor is None:
cur = self.conn.cursor()
else:
cur = cursor
realpath = configuration.translate_path(path)
hash = file_hash(realpath)
qry = """
SELECT data
FROM file_data
WHERE hash = %s;
"""
cur.execute(qry, (hash,))
res = cur.fetchone()
if cursor is None:
self.maybe_commit()
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction
return res[0]
def surveys (self, include_archived = False):
"""
Return list of survey definitions.
"""
if self.conn is None:
self.connect()
if include_archived:
qry = """
SELECT meta, schema
FROM public.projects;
"""
else:
qry = """
SELECT meta, schema
FROM public.projects
WHERE NOT (meta->'archived')::boolean IS true
"""
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(qry)
results = cursor.fetchall()
surveys = []
for r in results:
if r[0]:
r[0]['schema'] = r[1]
surveys.append(r[0])
return surveys
# TODO Does this need tweaking on account of #246?
def apply_survey_configuration(self, cursor = None):
if cursor is None:
cur = self.conn.cursor()
@@ -801,73 +631,3 @@ class Datastore:
self.maybe_commit()
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction
def del_sequence_final(self, sequence, cursor = None):
"""
Remove final data for a sequence.
"""
if cursor is None:
cur = self.conn.cursor()
else:
cur = cursor
qry = "DELETE FROM files WHERE hash = (SELECT hash FROM final_lines_files WHERE sequence = %s);"
cur.execute(qry, (sequence,))
if cursor is None:
self.maybe_commit()
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction
def adjust_planner(self, cursor = None):
"""
Adjust estimated times on the planner
"""
if cursor is None:
cur = self.conn.cursor()
else:
cur = cursor
qry = "CALL adjust_planner();"
cur.execute(qry)
if cursor is None:
self.maybe_commit()
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction
def housekeep_event_log(self, cursor = None):
"""
Call housekeeping actions on the event log
"""
if cursor is None:
cur = self.conn.cursor()
else:
cur = cursor
qry = "CALL augment_event_data();"
cur.execute(qry)
qry = "CALL scan_placeholders();"
cur.execute(qry)
if cursor is None:
self.maybe_commit()
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction
def run_daily_tasks(self, cursor = None):
"""
Run once-a-day tasks
"""
if cursor is None:
cur = self.conn.cursor()
else:
cur = cursor
qry = "CALL log_midnight_shots();"
cur.execute(qry)
if cursor is None:
self.maybe_commit()
# We do not commit if we've been passed a cursor, instead
# we assume that we are in the middle of a transaction

View File

@@ -1,163 +0,0 @@
#!/usr/bin/python3
"""
Delimited record importing functions.
"""
import csv
import builtins
def to_bool (v):
try:
return bool(int(v))
except ValueError:
if type(v) == str:
return v.strip().lower().startswith("t")
return False
transform = {
"int": lambda v: builtins.int(float(v)),
"float": float,
"string": str,
"bool": to_bool
}
def cast_values (row, fields):
def enum_for (key):
field = fields.get(key, {})
def enum (val):
if "enum" in field:
ret_val = field.get("default", val)
enums = field.get("enum", [])
for enum_key in enums:
if enum_key == val:
ret_val = enums[enum_key]
return ret_val
return val
return enum
# Get rid of any unwanted data
if None in row:
del(row[None])
for key in row:
val = row[key]
enum = enum_for(key)
transformer = transform.get(fields.get(key, {}).get("type"), str)
if type(val) == list:
for i, v in enumerate(val):
row[key][i] = transformer(enum(v))
elif type(val) == dict:
continue
else:
row[key] = transformer(enum(val))
return row
def build_fieldnames (spec): #(arr, key, val):
fieldnames = []
if "fields" in spec:
for key in spec["fields"]:
index = spec["fields"][key]["column"]
try:
fieldnames[index] = key
except IndexError:
assert index >= 0
fieldnames.extend(((index + 1) - len(fieldnames)) * [None])
fieldnames[index] = key
return fieldnames
def from_file_delimited (path, spec):
fieldnames = build_fieldnames(spec)
fields = spec.get("fields", [])
delimiter = spec.get("delimiter", ",")
firstRow = spec.get("firstRow", 0)
headerRow = spec.get("headerRow", False)
if headerRow:
firstRow += 1
records = []
with open(path, "r", errors="ignore") as fd:
if spec.get("type") == "x-sl+csv":
fieldnames = None # Pick from header row
firstRow = 0
reader = csv.DictReader(fd, delimiter=delimiter)
else:
reader = csv.DictReader(fd, fieldnames=fieldnames, delimiter=delimiter)
row = 0
for line in reader:
skip = False
if row < firstRow:
skip = True
if not skip:
records.append(cast_values(dict(line), fields))
row += 1
return records
def remap (line, headers):
row = dict()
for i, key in enumerate(headers):
if "." in key[1:-1]:
# This is an object
k, attr = key.split(".")
if not k in row:
row[k] = dict()
row[k][attr] = line[i]
elif key in row:
if type(row[key]) == list:
row[key].append(line[i])
else:
row[key] = [ row[key], line[i] ]
else:
row[key] = line[i]
return row
def from_file_saillines (path, spec):
fields = {
"sail_line": { "type": "int" },
"source_line": { "type": "int" },
"incr": { "type": "bool" },
"ntba": { "type": "bool" }
}
# fields = spec.get("fields", sl_fields)
delimiter = spec.get("delimiter", ",")
firstRow = spec.get("firstRow", 0)
records = []
with open(path, "r", errors="ignore") as fd:
row = 0
reader = csv.reader(fd, delimiter=delimiter)
while row < firstRow:
next(reader)
row += 1
headers = [ h.strip() for h in next(reader) if len(h.strip()) ]
for line in reader:
records.append(cast_values(remap(line, headers), fields))
return records
def from_file_p111 (path, spec):
pass
def from_file (path, spec):
if spec.get("type") == "x-sl+csv":
return from_file_saillines(path, spec)
else:
return from_file_delimited(path, spec)

View File

@@ -1,128 +0,0 @@
#!/usr/bin/python3
"""
Fixed width record importing functions.
"""
import builtins
def to_bool (v):
try:
return bool(int(v))
except ValueError:
if type(v) == str:
return v.strip().lower().startswith("t")
return False
transform = {
"int": lambda v: builtins.int(float(v)),
"float": float,
"string": str,
"str": str,
"bool": to_bool
}
def parse_line (line, fields, fixed = None):
# print("parse_line", line, fields, fixed)
data = dict()
if fixed:
for value in fixed:
start = value["offset"]
end = start + len(value["text"])
text = line[start:end]
if text != value["text"]:
return f"Expected text `{value['text']}` at position {start} but found `{text}` instead."
for key in fields:
spec = fields[key]
transformer = transform[spec.get("type", "str")]
pos_from = spec["offset"]
pos_to = pos_from + spec["length"]
text = line[pos_from:pos_to]
value = transformer(text)
if "enum" in spec:
if "default" in spec:
value = spec["default"]
for enum_key in spec["enum"]:
if enum_key == text:
enum_value = transformer(spec["enum"][enum_key])
value = enum_value
break
data[key] = value
# print("parse_line data =", data)
return data
specfields = {
"sps1": {
"line_name": { "offset": 1, "length": 16, "type": "int" },
"point_number": { "offset": 17, "length": 8, "type": "int" },
"easting": { "offset": 46, "length": 9, "type": "float" },
"northing": { "offset": 55, "length": 10, "type": "float" }
},
"sps21": {
"line_name": { "offset": 1, "length": 7, "type": "int" },
"point_number": { "offset": 11, "length": 7, "type": "int" },
"easting": { "offset": 46, "length": 9, "type": "float" },
"northing": { "offset": 55, "length": 10, "type": "float" }
},
"p190": {
"line_name": { "offset": 1, "length": 12, "type": "int" },
"point_number": { "offset": 19, "length": 6, "type": "int" },
"easting": { "offset": 46, "length": 9, "type": "float" },
"northing": { "offset": 55, "length": 9, "type": "float" }
},
}
def from_file(path, spec):
# If spec.fields is not present, deduce it from spec.type ("sps1", "sps21", "p190", etc.)
if "fields" in spec:
fields = spec["fields"]
elif "type" in spec and spec["type"] in specfields:
fields = specfields[spec["type"]]
else:
# TODO: Should default to looking for spec.format and doing a legacy import on it
return "Neither 'type' nor 'fields' given. I don't know how to import this fixed-width dataset."
firstRow = spec.get("firstRow", 0)
skipStart = [] # Skip lines starting with any of these values
skipMatch = [] # Skip lines matching any of these values
if "type" in spec:
if spec["type"] == "sps1" or spec["type"] == "sps21" or spec["type"] == "p190":
skipStart = "H"
skipMatch = "EOF"
records = []
with open(path, "r", errors="ignore") as fd:
row = 0
line = fd.readline()
while line:
skip = False
if row < firstRow:
skip = True
if not skip:
for v in skipStart:
if line.startswith(v):
skip = True
break
for v in skipMatch:
if line == v:
skip = True
break
if not skip:
records.append(parse_line(line, fields))
row += 1
line = fd.readline()
return records

View File

@@ -1,26 +0,0 @@
#!/usr/bin/python3
"""
Do housekeeping actions on the database.
"""
import configuration
from datastore import Datastore
if __name__ == '__main__':
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
print("Reading surveys")
for survey in surveys:
print(f'Survey: {survey["id"]} ({survey["schema"]})')
db.set_survey(survey["schema"])
print("Planner adjustment")
db.adjust_planner()
print("Event log housekeeping")
db.housekeep_event_log()
print("Done")

View File

@@ -59,7 +59,7 @@ def qc_data (cursor, prefix):
else:
print("No QC data found");
return
#print("QC", qc)
index = 0
for item in qc["results"]:

View File

@@ -39,7 +39,7 @@ def seis_data (survey):
if not pathlib.Path(pathPrefix).exists():
print(pathPrefix)
raise ValueError("Export path does not exist")
print(f"Requesting sequences for {survey['id']}")
url = f"http://localhost:3000/api/project/{survey['id']}/sequence"
r = requests.get(url)
@@ -47,12 +47,12 @@ def seis_data (survey):
for sequence in r.json():
if sequence['status'] not in ["final", "ntbp"]:
continue
filename = pathlib.Path(pathPrefix, "sequence{:0>3d}.json".format(sequence['sequence']))
if filename.exists():
print(f"Skipping export for sequence {sequence['sequence']} file already exists")
continue
print(f"Processing sequence {sequence['sequence']}")
url = f"http://localhost:3000/api/project/{survey['id']}/event?sequence={sequence['sequence']}&missing=t"
headers = { "Accept": "application/vnd.seis+json" }

View File

@@ -12,51 +12,18 @@ import os
import sys
import pathlib
import re
import time
import configuration
import p111
import fwr
from datastore import Datastore
def add_pending_remark(db, sequence):
text = '<!-- @@DGL:PENDING@@ --><h4 style="color:red;cursor:help;" title="Edit the sequence file or directory name to import final data">Marked as <code>PENDING</code>.</h4><!-- @@/DGL:PENDING@@ -->\n'
with db.conn.cursor() as cursor:
qry = "SELECT remarks FROM raw_lines WHERE sequence = %s;"
cursor.execute(qry, (sequence,))
remarks = cursor.fetchone()[0]
rx = re.compile("^(<!-- @@DGL:PENDING@@ -->.*<!-- @@/DGL:PENDING@@ -->\n)")
m = rx.match(remarks)
if m is None:
remarks = text + remarks
qry = "UPDATE raw_lines SET remarks = %s WHERE sequence = %s;"
cursor.execute(qry, (remarks, sequence))
db.maybe_commit()
def del_pending_remark(db, sequence):
with db.conn.cursor() as cursor:
qry = "SELECT remarks FROM raw_lines WHERE sequence = %s;"
cursor.execute(qry, (sequence,))
row = cursor.fetchone()
if row is not None:
remarks = row[0]
rx = re.compile("^(<!-- @@DGL:PENDING@@ -->.*<!-- @@/DGL:PENDING@@ -->\n)")
m = rx.match(remarks)
if m is not None:
remarks = rx.sub("",remarks)
qry = "UPDATE raw_lines SET remarks = %s WHERE sequence = %s;"
cursor.execute(qry, (remarks, sequence))
db.maybe_commit()
if __name__ == '__main__':
print("Reading configuration")
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
surveys = configuration.surveys()
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
db.connect()
print("Reading surveys")
for survey in surveys:
@@ -70,100 +37,38 @@ if __name__ == '__main__':
print("No final P1/11 configuration")
exit(0)
lineNameInfo = final_p111.get("lineNameInfo")
pattern = final_p111.get("pattern")
if not lineNameInfo:
if not pattern:
print("ERROR! Missing final.p111.lineNameInfo in project configuration. Cannot import final P111")
raise Exception("Missing final.p111.lineNameInfo")
else:
print("WARNING! No `lineNameInfo` in project configuration (final.p111). You should add it to the settings.")
rx = None
if pattern and pattern.get("regex"):
rx = re.compile(pattern["regex"])
if "pending" in survey["final"]:
pendingRx = re.compile(survey["final"]["pending"]["pattern"]["regex"])
pattern = final_p111["pattern"]
rx = re.compile(pattern["regex"])
for fileprefix in final_p111["paths"]:
realprefix = configuration.translate_path(fileprefix)
print(f"Path prefix: {fileprefix}{realprefix}")
print(f"Path prefix: {fileprefix}")
for globspec in final_p111["globs"]:
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
physical_filepath = str(physical_filepath)
logical_filepath = configuration.untranslate_path(physical_filepath)
print(f"Found {logical_filepath}")
pending = False
if pendingRx:
pending = pendingRx.search(physical_filepath) is not None
if not db.file_in_db(logical_filepath):
age = time.time() - os.path.getmtime(physical_filepath)
if age < file_min_age:
print("Skipping file because too new", logical_filepath)
continue
for filepath in pathlib.Path(fileprefix).glob(globspec):
filepath = str(filepath)
print(f"Found {filepath}")
if not db.file_in_db(filepath):
print("Importing")
if rx:
match = rx.match(os.path.basename(logical_filepath))
if not match:
error_message = f"File path not match the expected format! ({logical_filepath} ~ {pattern['regex']})"
print(error_message, file=sys.stderr)
print("This file will be ignored!")
continue
file_info = dict(zip(pattern["captures"], match.groups()))
file_info["meta"] = {}
if lineNameInfo:
basename = os.path.basename(physical_filepath)
fields = lineNameInfo.get("fields", {})
fixed = lineNameInfo.get("fixed")
try:
parsed_line = fwr.parse_line(basename, fields, fixed)
except ValueError as err:
parsed_line = "Line format error: " + str(err)
if type(parsed_line) == str:
print(parsed_line, file=sys.stderr)
print("This file will be ignored!")
continue
file_info = {}
file_info["sequence"] = parsed_line["sequence"]
file_info["line"] = parsed_line["line"]
del(parsed_line["sequence"])
del(parsed_line["line"])
file_info["meta"] = {
"fileInfo": parsed_line
}
if pending:
print("Skipping / removing final file because marked as PENDING", logical_filepath)
db.del_sequence_final(file_info["sequence"])
add_pending_remark(db, file_info["sequence"])
match = rx.match(os.path.basename(filepath))
if not match:
error_message = f"File path not match the expected format! ({filepath} ~ {pattern['regex']})"
print(error_message, file=sys.stderr)
print("This file will be ignored!")
continue
else:
del_pending_remark(db, file_info["sequence"])
p111_data = p111.from_file(physical_filepath)
file_info = dict(zip(pattern["captures"], match.groups()))
p111_data = p111.from_file(filepath)
print("Saving")
p111_records = p111.p111_type("S", p111_data)
file_info["meta"]["lineName"] = p111.line_name(p111_data)
db.save_final_p111(p111_records, file_info, logical_filepath, survey["epsg"])
db.save_final_p111(p111_records, file_info, filepath, survey["epsg"])
else:
print("Already in DB")
if pending:
print("Removing from database because marked as PENDING")
db.del_sequence_final(file_info["sequence"])
add_pending_remark(db, file_info["sequence"])
print("Done")

View File

@@ -12,7 +12,6 @@ import os
import sys
import pathlib
import re
import time
import configuration
import p190
from datastore import Datastore
@@ -21,7 +20,6 @@ if __name__ == '__main__':
print("Reading configuration")
surveys = configuration.surveys()
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
print("Connecting to database")
db = Datastore()
@@ -51,12 +49,6 @@ if __name__ == '__main__':
print(f"Found {filepath}")
if not db.file_in_db(filepath):
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
print("Importing")
match = rx.match(os.path.basename(filepath))

View File

@@ -1,127 +0,0 @@
#!/usr/bin/python3
"""
Import SmartSource data.
For each survey in configuration.surveys(), check for new
or modified final gun header files and (re-)import them into the
database.
"""
import os
import sys
import pathlib
import re
import time
import json
import configuration
from datastore import Datastore
if __name__ == '__main__':
"""
Imports map layers from the directories defined in the configuration object
`import.map.layers`. The content of that key is an object with the following
structure:
{
layer1Name: [
format: "geojson",
path: "", // Logical path to a directory
globs: [
"**/*.geojson", // List of globs matching map data files
]
],
layer2Name: …
}
"""
def process (layer_name, layer, physical_filepath):
physical_filepath = str(physical_filepath)
logical_filepath = configuration.untranslate_path(physical_filepath)
print(f"Found {logical_filepath}")
if not db.file_in_db(logical_filepath):
age = time.time() - os.path.getmtime(physical_filepath)
if age < file_min_age:
print("Skipping file because too new", logical_filepath)
return
print("Importing")
file_info = {
"type": "map_layer",
"format": layer["format"],
"name": layer_name,
"tooltip": layer.get("tooltip"),
"popup": layer.get("popup")
}
db.save_file_data(logical_filepath, json.dumps(file_info))
else:
file_info = db.get_file_data(logical_filepath)
dirty = False
if file_info:
if file_info["name"] != layer_name:
print("Renaming to", layer_name)
file_info["name"] = layer_name
dirty = True
if file_info.get("tooltip") != layer.get("tooltip"):
print("Changing tooltip to", layer.get("tooltip") or "null")
file_info["tooltip"] = layer.get("tooltip")
dirty = True
if file_info.get("popup") != layer.get("popup"):
print("Changing popup to", layer.get("popup") or "null")
file_info["popup"] = layer.get("popup")
dirty = True
if dirty:
db.save_file_data(logical_filepath, json.dumps(file_info))
else:
print("Already in DB")
print("Reading configuration")
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
print("Reading surveys")
for survey in surveys:
print(f'Survey: {survey["id"]} ({survey["schema"]})')
db.set_survey(survey["schema"])
try:
map_layers = survey["imports"]["map"]["layers"]
except KeyError:
print("No map layers defined")
continue
for layer_name, layer_items in map_layers.items():
for layer in layer_items:
fileprefix = layer["path"]
realprefix = configuration.translate_path(fileprefix)
if os.path.isfile(realprefix):
process(layer_name, layer, realprefix)
elif os.path.isdir(realprefix):
if not "globs" in layer:
layer["globs"] = [ "**/*.geojson" ]
for globspec in layer["globs"]:
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
process(layer_name, layer, physical_filepath)
print("Done")

View File

@@ -8,59 +8,36 @@ or modified preplots and (re-)import them into the database.
"""
from glob import glob
import os
import sys
import time
import configuration
import preplots
from datastore import Datastore
def preplots_sorter (preplot):
rank = {
"x-sl+csv": 10
}
return rank.get(preplot.get("type"), 0)
if __name__ == '__main__':
print("Reading configuration")
surveys = configuration.surveys()
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
print("Reading configuration")
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
print("Reading surveys")
for survey in surveys:
print(f'Survey: {survey["id"]} ({survey["schema"]})')
db.set_survey(survey["schema"])
# We sort the preplots so that ancillary line info always comes last,
# after the actual line + point data has been imported
for file in sorted(survey["preplots"], key=preplots_sorter):
realpath = configuration.translate_path(file["path"])
for file in survey["preplots"]:
print(f"Preplot: {file['path']}")
if not db.file_in_db(file["path"]):
age = time.time() - os.path.getmtime(realpath)
if age < file_min_age:
print("Skipping file because too new", file["path"])
continue
print("Importing")
try:
preplot = preplots.from_file(file, realpath)
preplot = preplots.from_file(file)
except FileNotFoundError:
print(f"File does not exist: {file['path']}", file=sys.stderr)
continue
if type(preplot) is list:
print("Saving to DB")
if file.get("type") == "x-sl+csv":
db.save_preplot_line_info(preplot, file["path"], file)
else:
db.save_preplots(preplot, file["path"], file["class"], survey["epsg"], file)
db.save_preplots(preplot, file["path"], file["class"], survey["epsg"], file)
elif type(preplot) is str:
print(preplot)
else:

View File

@@ -12,20 +12,18 @@ import os
import sys
import pathlib
import re
import time
import configuration
import p111
import fwr
from datastore import Datastore
if __name__ == '__main__':
print("Reading configuration")
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
surveys = configuration.surveys()
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
db.connect()
print("Reading surveys")
for survey in surveys:
@@ -39,95 +37,50 @@ if __name__ == '__main__':
print("No raw P1/11 configuration")
exit(0)
lineNameInfo = raw_p111.get("lineNameInfo")
pattern = raw_p111.get("pattern")
if not lineNameInfo:
if not pattern:
print("ERROR! Missing raw.p111.lineNameInfo in project configuration. Cannot import raw P111")
raise Exception("Missing raw.p111.lineNameInfo")
else:
print("WARNING! No `lineNameInfo` in project configuration (raw.p111). You should add it to the settings.")
rx = None
if pattern and pattern.get("regex"):
rx = re.compile(pattern["regex"])
pattern = raw_p111["pattern"]
rx = re.compile(pattern["regex"])
if "ntbp" in survey["raw"]:
ntbpRx = re.compile(survey["raw"]["ntbp"]["pattern"]["regex"])
for fileprefix in raw_p111["paths"]:
realprefix = configuration.translate_path(fileprefix)
print(f"Path prefix: {fileprefix}{realprefix}")
print(f"Path prefix: {fileprefix}")
for globspec in raw_p111["globs"]:
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
physical_filepath = str(physical_filepath)
logical_filepath = configuration.untranslate_path(physical_filepath)
print(f"Found {logical_filepath}")
for filepath in pathlib.Path(fileprefix).glob(globspec):
filepath = str(filepath)
print(f"Found {filepath}")
if ntbpRx:
ntbp = ntbpRx.search(physical_filepath) is not None
ntbp = ntbpRx.search(filepath) is not None
else:
ntbp = False
if not db.file_in_db(logical_filepath):
age = time.time() - os.path.getmtime(physical_filepath)
if age < file_min_age:
print("Skipping file because too new", logical_filepath)
continue
if not db.file_in_db(filepath):
print("Importing")
if rx:
match = rx.match(os.path.basename(logical_filepath))
if not match:
error_message = f"File path not matching the expected format! ({logical_filepath} ~ {pattern['regex']})"
print(error_message, file=sys.stderr)
print("This file will be ignored!")
continue
match = rx.match(os.path.basename(filepath))
if not match:
error_message = f"File path not match the expected format! ({filepath} ~ {pattern['regex']})"
print(error_message, file=sys.stderr)
print("This file will be ignored!")
continue
file_info = dict(zip(pattern["captures"], match.groups()))
file_info["meta"] = {}
file_info = dict(zip(pattern["captures"], match.groups()))
if lineNameInfo:
basename = os.path.basename(physical_filepath)
fields = lineNameInfo.get("fields", {})
fixed = lineNameInfo.get("fixed")
try:
parsed_line = fwr.parse_line(basename, fields, fixed)
except ValueError as err:
parsed_line = "Line format error: " + str(err)
if type(parsed_line) == str:
print(parsed_line, file=sys.stderr)
print("This file will be ignored!")
continue
file_info = {}
file_info["sequence"] = parsed_line["sequence"]
file_info["line"] = parsed_line["line"]
del(parsed_line["sequence"])
del(parsed_line["line"])
file_info["meta"] = {
"fileInfo": parsed_line
}
p111_data = p111.from_file(physical_filepath)
p111_data = p111.from_file(filepath)
print("Saving")
p111_records = p111.p111_type("S", p111_data)
if len(p111_records):
file_info["meta"]["lineName"] = p111.line_name(p111_data)
db.save_raw_p111(p111_records, file_info, logical_filepath, survey["epsg"], ntbp=ntbp)
else:
print("No source records found in file")
db.save_raw_p111(p111_records, file_info, filepath, survey["epsg"], ntbp=ntbp)
else:
print("Already in DB")
# Update the NTBP status to whatever the latest is,
# as it might have changed.
db.set_ntbp(logical_filepath, ntbp)
db.set_ntbp(filepath, ntbp)
if ntbp:
print("Sequence is NTBP")

View File

@@ -12,7 +12,6 @@ import os
import sys
import pathlib
import re
import time
import configuration
import p190
from datastore import Datastore
@@ -21,7 +20,6 @@ if __name__ == '__main__':
print("Reading configuration")
surveys = configuration.surveys()
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
print("Connecting to database")
db = Datastore()
@@ -54,12 +52,6 @@ if __name__ == '__main__':
print(f"Found {filepath}")
if not db.file_in_db(filepath):
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
print("Importing")
match = rx.match(os.path.basename(filepath))

View File

@@ -12,20 +12,18 @@ import os
import sys
import pathlib
import re
import time
import configuration
import smsrc
import fwr
from datastore import Datastore
if __name__ == '__main__':
print("Reading configuration")
file_min_age = configuration.read().get('imports', {}).get('file_min_age', 10)
surveys = configuration.surveys()
print("Connecting to database")
db = Datastore()
surveys = db.surveys()
db.connect()
print("Reading surveys")
for survey in surveys:
@@ -34,80 +32,43 @@ if __name__ == '__main__':
db.set_survey(survey["schema"])
try:
raw_smsrc = survey["raw"]["source"]["smsrc"]["header"]
raw_smsrc = survey["raw"]["smsrc"]
except KeyError:
print("No SmartSource data configuration")
continue
# NOTE I've no idea what this is 🤔
# flags = 0
# if "flags" in raw_smsrc:
# configuration.rxflags(raw_smsrc["flags"])
flags = 0
if "flags" in raw_smsrc:
configuration.rxflags(raw_smsrc["flags"])
lineNameInfo = raw_smsrc.get("lineNameInfo")
pattern = raw_smsrc.get("pattern")
rx = None
if pattern and pattern.get("regex"):
rx = re.compile(pattern["regex"])
pattern = raw_smsrc["pattern"]
rx = re.compile(pattern["regex"], flags)
for fileprefix in raw_smsrc["paths"]:
realprefix = configuration.translate_path(fileprefix)
print(f"Path prefix: {fileprefix}{realprefix}")
print(f"Path prefix: {fileprefix}")
for globspec in raw_smsrc["globs"]:
for physical_filepath in pathlib.Path(realprefix).glob(globspec):
physical_filepath = str(physical_filepath)
logical_filepath = configuration.untranslate_path(physical_filepath)
print(f"Found {logical_filepath}")
if not db.file_in_db(logical_filepath):
age = time.time() - os.path.getmtime(physical_filepath)
if age < file_min_age:
print("Skipping file because too new", logical_filepath)
continue
for filepath in pathlib.Path(fileprefix).glob(globspec):
filepath = str(filepath)
print(f"Found {filepath}")
if not db.file_in_db(filepath):
print("Importing")
if rx:
match = rx.match(os.path.basename(logical_filepath))
if not match:
error_message = f"File path not matching the expected format! ({logical_filepath} ~ {pattern['regex']})"
print(error_message, file=sys.stderr)
print("This file will be ignored!")
continue
match = rx.match(os.path.basename(filepath))
if not match:
error_message = f"File path not matching the expected format! ({filepath} ~ {pattern['regex']})"
print(error_message, file=sys.stderr)
print("This file will be ignored!")
continue
file_info = dict(zip(pattern["captures"], match.groups()))
file_info["meta"] = {}
file_info = dict(zip(pattern["captures"], match.groups()))
if lineNameInfo:
basename = os.path.basename(physical_filepath)
fields = lineNameInfo.get("fields", {})
fixed = lineNameInfo.get("fixed")
try:
parsed_line = fwr.parse_line(basename, fields, fixed)
except ValueError as err:
parsed_line = "Line format error: " + str(err)
if type(parsed_line) == str:
print(parsed_line, file=sys.stderr)
print("This file will be ignored!")
continue
file_info = {}
file_info["sequence"] = parsed_line["sequence"]
file_info["line"] = parsed_line["line"]
del(parsed_line["sequence"])
del(parsed_line["line"])
file_info["meta"] = {
"fileInfo": parsed_line
}
smsrc_records = smsrc.from_file(physical_filepath)
smsrc_records = smsrc.from_file(filepath)
print("Saving")
db.save_raw_smsrc(smsrc_records, file_info, logical_filepath)
db.save_raw_smsrc(smsrc_records, file_info, filepath)
else:
print("Already in DB")

View File

@@ -15,4 +15,25 @@ from datastore import Datastore
if __name__ == '__main__':
print("This function is obsolete. Returning with no action")
print("Reading configuration")
configs = configuration.files(include_archived = True)
print("Connecting to database")
db = Datastore()
#db.connect()
print("Reading surveys")
for config in configs:
filepath = config[0]
survey = config[1]
print(f'Survey: {survey["id"]} ({filepath})')
db.set_survey(survey["schema"])
if not db.file_in_db(filepath):
print("Saving to DB")
db.save_file_data(filepath, json.dumps(survey))
print("Applying survey configuration")
db.apply_survey_configuration()
else:
print("Already in DB")
print("Done")

View File

@@ -1,48 +0,0 @@
#!/usr/bin/python3
from datetime import datetime
from datastore import Datastore
def detect_schema (conn):
with conn.cursor() as cursor:
qry = "SELECT meta->>'_schema' AS schema, tstamp, age(current_timestamp, tstamp) age FROM real_time_inputs WHERE meta ? '_schema' AND age(current_timestamp, tstamp) < '02:00:00' ORDER BY tstamp DESC LIMIT 1"
cursor.execute(qry)
res = cursor.fetchone()
if res and len(res):
return res[0]
return None
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--schema", required=False, default=None, help="survey where to insert the event")
ap.add_argument("-t", "--tstamp", required=False, default=None, help="event timestamp")
ap.add_argument("-l", "--label", required=False, default=None, action="append", help="event label")
ap.add_argument('remarks', type=str, nargs="+", help="event message")
args = vars(ap.parse_args())
db = Datastore()
db.connect()
if args["schema"]:
schema = args["schema"]
else:
schema = detect_schema(db.conn)
if args["tstamp"]:
tstamp = args["tstamp"]
else:
tstamp = datetime.utcnow().isoformat()
message = " ".join(args["remarks"])
print("new event:", schema, tstamp, message, args["label"])
if schema and tstamp and message:
db.set_survey(schema)
with db.conn.cursor() as cursor:
qry = "INSERT INTO event_log (tstamp, remarks, labels) VALUES (%s, replace_placeholders(%s, %s, NULL, NULL), %s);"
cursor.execute(qry, (tstamp, message, tstamp, args["label"]))
db.maybe_commit()

View File

@@ -7,6 +7,7 @@ P1/11 parsing functions.
import math
import re
from datetime import datetime, timedelta, timezone
from parse_fwr import parse_fwr
def _int (string):
return int(float(string))
@@ -152,9 +153,6 @@ def parse_line (string):
return None
def line_name(records):
return set([ r['Acquisition Line Name'] for r in p111_type("S", records) ]).pop()
def p111_type(type, records):
return [ r for r in records if r["type"] == type ]

View File

@@ -12,7 +12,7 @@ from parse_fwr import parse_fwr
def parse_p190_header (string):
"""Parse a generic P1/90 header record.
Returns a dictionary of fields.
"""
names = [ "record_type", "header_type", "header_type_modifier", "description", "data" ]
@@ -27,7 +27,7 @@ def parse_p190_type1 (string):
"doy", "time", "spare2" ]
record = parse_fwr(string, [1, 12, 3, 1, 1, 1, 6, 10, 11, 9, 9, 6, 3, 6, 1])
return dict(zip(names, record))
def parse_p190_rcv_group (string):
"""Parse a P1/90 Type 1 receiver group record."""
names = [ "record_type",
@@ -37,7 +37,7 @@ def parse_p190_rcv_group (string):
"streamer_id" ]
record = parse_fwr(string, [1, 4, 9, 9, 4, 4, 9, 9, 4, 4, 9, 9, 4, 1])
return dict(zip(names, record))
def parse_line (string):
type = string[0]
if string[:3] == "EOF":
@@ -52,7 +52,7 @@ def parse_line (string):
def p190_type(type, records):
return [ r for r in records if r["record_type"] == type ]
def p190_header(code, records):
return [ h for h in p190_type("H", records) if h["header_type"]+h["header_type_modifier"] == code ]
@@ -86,15 +86,15 @@ def normalise_record(record):
# These are probably strings
elif "strip" in dir(record[key]):
record[key] = record[key].strip()
return record
def normalise(records):
for record in records:
normalise_record(record)
return records
def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
records = []
with open(path) as fd:
@@ -102,10 +102,10 @@ def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
line = fd.readline()
while line:
cnt = cnt + 1
if line == "EOF":
break
record = parse_line(line)
if record is not None:
if only_records:
@@ -121,9 +121,9 @@ def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
records.append(record)
line = fd.readline()
return records
def apply_tstamps(recordset, tstamp=None, fix_bad_seconds=False):
#print("tstamp", tstamp, type(tstamp))
if type(tstamp) is int:
@@ -161,16 +161,16 @@ def apply_tstamps(recordset, tstamp=None, fix_bad_seconds=False):
record["tstamp"] = ts
prev[object_id(record)] = doy
break
return recordset
def dms(value):
# 591544.61N
hemisphere = 1 if value[-1] in "NnEe" else -1
seconds = float(value[-6:-1])
minutes = int(value[-8:-6])
degrees = int(value[:-8])
return (degrees + minutes/60 + seconds/3600) * hemisphere
def tod(record):
@@ -183,7 +183,7 @@ def tod(record):
m = int(time[2:4])
s = float(time[4:])
return d*86400 + h*3600 + m*60 + s
def duration(record0, record1):
ts0 = tod(record0)
ts1 = tod(record1)
@@ -198,10 +198,10 @@ def azimuth(record0, record1):
x0, y0 = float(record0["easting"]), float(record0["northing"])
x1, y1 = float(record1["easting"]), float(record1["northing"])
return math.degrees(math.atan2(x1-x0, y1-y0)) % 360
def speed(record0, record1, knots=False):
scale = 3600/1852 if knots else 1
t0 = tod(record0)
t1 = tod(record1)
return (distance(record0, record1) / math.fabs(t1-t0)) * scale

21
bin/parse_fwr.py Normal file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/python3
def parse_fwr (string, widths, start=0):
"""Parse a fixed-width record.
string: the string to parse.
widths: a list of record widths. A negative width denotes a field to be skipped.
start: optional start index.
Returns a list of strings.
"""
results = []
current_index = start
for width in widths:
if width > 0:
results.append(string[current_index : current_index + width])
current_index += width
else:
current_index -= width
return results

View File

@@ -1,51 +1,14 @@
import fwr
import delimited
import sps
"""
Preplot importing functions.
"""
def is_fixed_width (file):
fixed_width_types = [ "sps1", "sps21", "p190", "fixed-width" ]
return type(file) == dict and "type" in file and file["type"] in fixed_width_types
def is_delimited (file):
delimited_types = [ "csv", "p111", "x-sl+csv" ]
return type(file) == dict and "type" in file and file["type"] in delimited_types
def from_file (file, realpath = None):
"""
Return a list of dicts, where each dict has the structure:
{
"line_name": <int>,
"points": [
{
"line_name": <int>,
"point_number": <int>,
"easting": <float>,
"northing": <float>
},
]
}
On error, return a string describing the error condition.
"""
filepath = realpath or file["path"]
if is_fixed_width(file):
records = fwr.from_file(filepath, file)
elif is_delimited(file):
records = delimited.from_file(filepath, file)
def from_file (file):
if not "type" in file or file["type"] == "sps":
records = sps.from_file(file["path"], file["format"] if "format" in file else None )
else:
return "Unrecognised file format"
if type(records) == str:
# This is an error message
return records
if file.get("type") == "x-sl+csv":
return records
return "Not an SPS file"
lines = []
line_names = set([r["line_name"] for r in records])

View File

@@ -13,27 +13,21 @@ from datastore import Datastore
if __name__ == '__main__':
print("Reading configuration")
surveys = configuration.surveys()
print("Connecting to database")
db = Datastore()
print("Reading configuration")
surveys = db.surveys()
print("Reading surveys")
for survey in surveys:
print(f'Survey: {survey["id"]} ({survey["schema"]})')
db.set_survey(survey["schema"])
for file in db.list_files():
try:
path = configuration.translate_path(file[0])
if not os.path.exists(path):
print(path, "NOT FOUND")
db.del_file(file[0])
except TypeError:
# In case the logical path no longer matches
# the Dougal configuration.
print(file[0], "COULD NOT BE TRANSLATED TO A PHYSICAL PATH. DELETING")
db.del_file(file[0])
path = file[0]
if not os.path.exists(path):
print(path, "NOT FOUND")
db.del_file(path)
print("Done")

View File

@@ -1,8 +1,5 @@
#!/bin/bash
# Maximum runtime in seconds before killing an overdue instance (e.g., 10 minutes)
MAX_RUNTIME_SECONDS=$((15 * 60))
DOUGAL_ROOT=${DOUGAL_ROOT:-$(dirname "$0")/..}
BINDIR="$DOUGAL_ROOT/bin"
@@ -11,20 +8,6 @@ LOCKFILE=${LOCKFILE:-$VARDIR/runner.lock}
[ -f ~/.profile ] && . ~/.profile
DOUGAL_LOG_TAG="dougal.runner[$$]"
# Only send output to the logger if we have the appropriate
# configuration set.
if [[ -n "$DOUGAL_LOG_TAG" && -n "$DOUGAL_LOG_FACILITY" ]]; then
function _logger () {
logger $*
}
else
function _logger () {
: # This is the Bash null command
}
fi
function tstamp () {
date -u +%Y-%m-%dT%H:%M:%SZ
}
@@ -35,44 +18,26 @@ function prefix () {
function print_log () {
printf "$(prefix)\033[36m%s\033[0m\n" "$*"
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.info" "$*"
}
function print_info () {
printf "$(prefix)\033[0m%s\n" "$*"
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.debug" "$*"
}
function print_warning () {
printf "$(prefix)\033[33;1m%s\033[0m\n" "$*"
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.warning" "$*"
}
function print_error () {
printf "$(prefix)\033[31m%s\033[0m\n" "$*"
_logger -t "$DOUGAL_LOG_TAG" -p "$DOUGAL_LOG_FACILITY.error" "$*"
}
function run () {
PROGNAME=${PROGNAME:-$(basename "$1")}
PROGNAME=$(basename "$1")
STDOUTLOG="$VARDIR/$PROGNAME.out"
STDERRLOG="$VARDIR/$PROGNAME.err"
# What follows runs the command that we have been given (with any arguments passed)
# and logs:
# * stdout to $STDOUTLOG (a temporary file) and possibly to syslog, if enabled.
# * stderr to $STDERRLOG (a temporary file) and possibly to syslog, if enabled.
#
# When logging to syslog, stdout goes as debug level and stderr as warning (not error)
#
# The temporary file is used in case the command fails, at which point we try to log
# a warning in GitLab's alerts facility.
$* \
> >(tee $STDOUTLOG |_logger -t "dougal.runner.$PROGNAME[$$]" -p "$DOUGAL_LOG_FACILITY.debug") \
2> >(tee $STDERRLOG |_logger -t "dougal.runner.$PROGNAME[$$]" -p "$DOUGAL_LOG_FACILITY.warning") || {
"$1" >"$STDOUTLOG" 2>"$STDERRLOG" || {
print_error "Failed: $PROGNAME"
cat $STDOUTLOG
cat $STDERRLOG
@@ -82,55 +47,25 @@ function run () {
# DESCRIPTION=""
SERVICE="deferred_imports"
# Disable GitLab alerts. They're just not very practical
# $BINDIR/send_alert.py -t "$TITLE" -s "$SERVICE" -l "critical" \
# -O "$(cat $STDOUTLOG)" -E "$(cat $STDERRLOG)"
$BINDIR/send_alert.py -t "$TITLE" -s "$SERVICE" -l "critical" \
-O "$(cat $STDOUTLOG)" -E "$(cat $STDERRLOG)"
exit 2
}
# cat $STDOUTLOG
unset PROGNAME
rm $STDOUTLOG $STDERRLOG
}
function cleanup () {
if [[ -f $LOCKFILE ]]; then
rm "$LOCKFILE"
fi
}
if [[ -f $LOCKFILE ]]; then
PID=$(cat "$LOCKFILE")
if kill -0 "$PID" 2>/dev/null; then # Check if process is running
# Get elapsed time in D-HH:MM:SS format and convert to seconds
ELAPSED_STR=$(ps -p "$PID" -o etime= | tr -d '[:space:]')
if [ -n "$ELAPSED_STR" ]; then
# Convert D-HH:MM:SS to seconds
ELAPSED_SECONDS=$(echo "$ELAPSED_STR" | awk -F'[-:]' '{
seconds = 0
if (NF == 4) { seconds += $1 * 86400 } # Days
if (NF >= 3) { seconds += $NF-2 * 3600 } # Hours
if (NF >= 2) { seconds += $NF-1 * 60 } # Minutes
seconds += $NF # Seconds
print seconds
}')
if [ "$ELAPSED_SECONDS" -gt "$MAX_RUNTIME_SECONDS" ]; then
# Kill the overdue process (SIGTERM; use -9 for SIGKILL if needed)
kill "$PID" 2>/dev/null
print_warning $(printf "Killed overdue process (%d) that ran for %s (%d seconds)" "$PID" "$ELAPSED_STR" "$ELAPSED_SECONDS")
rm "$LOCKFILE"
else
print_warning $(printf "Previous process is still running (%d) for %s (%d seconds)" "$PID" "$ELAPSED_STR" "$ELAPSED_SECONDS")
exit 1
fi
else
print_warning $(printf "Could not retrieve elapsed time for process (%d)" "$PID")
exit 1
fi
else
rm "$LOCKFILE"
print_warning $(printf "Previous process (%d) not found. Must have died unexpectedly" "$PID")
fi
PID=$(cat "$LOCKFILE")
if pgrep -F "$LOCKFILE"; then
print_warning $(printf "The previous process is still running (%d)" $PID)
exit 1
else
rm "$LOCKFILE"
print_warning $(printf "Previous process (%d) not found. Must have died unexpectedly" $PID)
fi
fi
echo "$$" > "$LOCKFILE" || {
@@ -139,13 +74,6 @@ echo "$$" > "$LOCKFILE" || {
}
print_info "Start run"
print_log "Check if data is accessible"
$BINDIR/check_mounts_present.py || {
print_warning "Import mounts not accessible. Inhibiting all tasks!"
cleanup
exit 253
}
print_log "Purge deleted files"
run $BINDIR/purge_deleted_files.py
@@ -158,47 +86,33 @@ run $BINDIR/import_preplots.py
print_log "Import raw P1/11"
run $BINDIR/import_raw_p111.py
#print_log "Import raw P1/90"
#run $BINDIR/import_raw_p190.py
print_log "Import raw P1/90"
run $BINDIR/import_raw_p190.py
print_log "Import final P1/11"
run $BINDIR/import_final_p111.py
#print_log "Import final P1/90"
#run $BINDIR/import_final_p190.py
print_log "Import final P1/90"
run $BINDIR/import_final_p190.py
print_log "Import SmartSource data"
run $BINDIR/import_smsrc.py
print_log "Import map user layers"
run $BINDIR/import_map_layers.py
if [[ -z "$RUNNER_NOEXPORT" ]]; then
print_log "Export system data"
run $BINDIR/system_exports.py
fi
# if [[ -z "$RUNNER_NOEXPORT" ]]; then
# print_log "Export system data"
# run $BINDIR/system_exports.py
# fi
if [[ -n "$RUNNER_IMPORT" ]]; then
print_log "Import system data"
run $BINDIR/system_imports.py
fi
# if [[ -n "$RUNNER_IMPORT" ]]; then
# print_log "Import system data"
# run $BINDIR/system_imports.py
# fi
print_log "Export QC data"
run $BINDIR/human_exports_qc.py
# print_log "Export QC data"
# run $BINDIR/human_exports_qc.py
# print_log "Export sequence data"
# run $BINDIR/human_exports_seis.py
print_log "Process ASAQC queue"
# Run insecure in test mode:
# export NODE_TLS_REJECT_UNAUTHORIZED=0
PROGNAME=asaqc_queue run $DOUGAL_ROOT/lib/www/server/queues/asaqc/index.js
print_log "Run database housekeeping actions"
run $BINDIR/housekeep_database.py
print_log "Run QCs"
PROGNAME=run_qc run $DOUGAL_ROOT/lib/www/server/lib/qc/index.js
print_log "Export sequence data"
run $BINDIR/human_exports_seis.py
rm "$LOCKFILE"

51
bin/sps.py Normal file
View File

@@ -0,0 +1,51 @@
#!/usr/bin/python3
"""
SPS importing functions.
And by SPS, we mean more or less any line-delimited, fixed-width record format.
"""
import builtins
from parse_fwr import parse_fwr
def int (v):
return builtins.int(float(v))
def parse_line (string, spec):
"""Parse a line from an SPS file."""
names = spec["names"]
widths = spec["widths"]
normalisers = spec["normalisers"]
record = [ t[0](t[1]) for t in zip(normalisers, parse_fwr(string, widths)) ]
return dict(zip(names, record))
def from_file(path, spec = None):
if spec is None:
spec = {
"names": [ "line_name", "point_number", "easting", "northing" ],
"widths": [ -1, 10, 10, -25, 10, 10 ],
"normalisers": [ int, int, float, float ]
}
else:
normaliser_tokens = [ "int", "float", "str", "bool" ]
spec["normalisers"] = [ eval(t) for t in spec["types"] if t in normaliser_tokens ]
records = []
with open(path) as fd:
cnt = 0
line = fd.readline()
while line:
cnt = cnt+1
if line == "EOF":
break
record = parse_line(line, spec)
if record is not None:
records.append(record)
line = fd.readline()
del spec["normalisers"]
return records

View File

@@ -24,7 +24,6 @@ locals().update(configuration.vars())
exportables = {
"public": {
"projects": [ "meta" ],
"info": None,
"real_time_inputs": None
},
"survey": {
@@ -33,13 +32,12 @@ exportables = {
"preplot_lines": [ "remarks", "ntba", "meta" ],
"preplot_points": [ "ntba", "meta" ],
"raw_lines": [ "remarks", "meta" ],
"raw_shots": [ "meta" ],
"planned_lines": None
"raw_shots": [ "meta" ]
}
}
def primary_key (table, cursor):
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
qry = """
SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type
@@ -50,7 +48,7 @@ def primary_key (table, cursor):
WHERE i.indrelid = %s::regclass
AND i.indisprimary;
"""
cursor.execute(qry, (table,))
return cursor.fetchall()

View File

@@ -40,10 +40,6 @@ if __name__ == '__main__':
continue
try:
for table in exportables:
path = os.path.join(pathPrefix, table)
if os.path.exists(path):
cursor.execute(f"DELETE FROM {table};")
for table in exportables:
path = os.path.join(pathPrefix, table)
print("", path, "", table)

View File

@@ -19,7 +19,6 @@ locals().update(configuration.vars())
exportables = {
"public": {
"projects": [ "meta" ],
"info": None,
"real_time_inputs": None
},
"survey": {
@@ -28,13 +27,12 @@ exportables = {
"preplot_lines": [ "remarks", "ntba", "meta" ],
"preplot_points": [ "ntba", "meta" ],
"raw_lines": [ "remarks", "meta" ],
"raw_shots": [ "meta" ],
"planned_lines": None
"raw_shots": [ "meta" ]
}
}
def primary_key (table, cursor):
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
qry = """
SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type
@@ -45,13 +43,13 @@ def primary_key (table, cursor):
WHERE i.indrelid = %s::regclass
AND i.indisprimary;
"""
cursor.execute(qry, (table,))
return cursor.fetchall()
def import_table(fd, table, columns, cursor):
pk = [ r[0] for r in primary_key(table, cursor) ]
# Create temporary table to import into
temptable = "import_"+table
print("Creating temporary table", temptable)
@@ -61,29 +59,29 @@ def import_table(fd, table, columns, cursor):
AS SELECT {', '.join(pk + columns)} FROM {table}
WITH NO DATA;
"""
#print(qry)
cursor.execute(qry)
# Import into the temp table
print("Import data into temporary table")
cursor.copy_from(fd, temptable)
# Update the destination table
print("Updating destination table")
setcols = ", ".join([ f"{c} = t.{c}" for c in columns ])
wherecols = " AND ".join([ f"{table}.{c} = t.{c}" for c in pk ])
qry = f"""
UPDATE {table}
SET {setcols}
FROM {temptable} t
WHERE {wherecols};
"""
#print(qry)
cursor.execute(qry)
if __name__ == '__main__':
@@ -98,21 +96,16 @@ if __name__ == '__main__':
with db.conn.cursor() as cursor:
columns = exportables["public"][table]
path = os.path.join(VARDIR, "-"+table)
try:
with open(path, "rb") as fd:
print(" →→ ", path, " ←← ", table, columns)
if columns is not None:
import_table(fd, table, columns, cursor)
else:
try:
print(f"Copying from {path} into {table}")
cursor.copy_from(fd, table)
except psycopg2.errors.UniqueViolation:
print(f"It looks like table {table} may have already been imported. Skipping it.")
except FileNotFoundError:
print(f"File not found. Skipping {path}")
db.conn.commit()
with open(path, "rb") as fd:
print(" →→ ", path, " ←← ", table, columns)
if columns is not None:
import_table(fd, table, columns, cursor)
else:
try:
print(f"Copying from {path} into {table}")
cursor.copy_from(fd, table)
except psycopg2.errors.UniqueViolation:
print(f"It looks like table {table} may have already been imported. Skipping it.")
print("Reading surveys")
for survey in surveys:
@@ -130,20 +123,17 @@ if __name__ == '__main__':
columns = exportables["survey"][table]
path = os.path.join(pathPrefix, "-"+table)
print(" ←← ", path, " →→ ", table, columns)
try:
with open(path, "rb") as fd:
if columns is not None:
import_table(fd, table, columns, cursor)
else:
try:
print(f"Copying from {path} into {table}")
cursor.copy_from(fd, table)
except psycopg2.errors.UniqueViolation:
print(f"It looks like table {table} may have already been imported. Skipping it.")
except FileNotFoundError:
print(f"File not found. Skipping {path}")
with open(path, "rb") as fd:
if columns is not None:
import_table(fd, table, columns, cursor)
else:
try:
print(f"Copying from {path} into {table}")
cursor.copy_from(fd, table)
except psycopg2.errors.UniqueViolation:
print(f"It looks like table {table} may have already been imported. Skipping it.")
# If we don't commit the data does not actually get copied
db.conn.commit()

View File

@@ -1,89 +0,0 @@
#!/usr/bin/node
const cmp = require('../lib/www/server/lib/comparisons');
async function purgeComparisons () {
const groups = await cmp.groups();
const comparisons = await cmp.getGroup();
const pids = new Set(Object.values(groups).flat().map( p => p.pid ));
const comparison_pids = new Set(comparisons.map( c => [ c.baseline_pid, c.monitor_pid ] ).flat());
for (const pid of comparison_pids) {
if (!pids.has(pid)) {
console.log(`${pid} no longer par of a group. Deleting comparisons`);
staleComps = comparisons.filter( c => c.baseline_pid == pid || c.monitor_pid == pid );
for (c of staleComps) {
console.log(`Deleting comparison ${c.baseline_pid}${c.monitor_pid}`);
await cmp.remove(c.baseline_pid, c.monitor_pid);
}
}
}
}
async function main () {
console.log("Looking for unreferenced comparisons to purge");
await purgeComparisons();
console.log("Retrieving project groups");
const groups = await cmp.groups();
if (!Object.keys(groups??{})?.length) {
console.log("No groups found");
return 0;
}
console.log(`Found ${Object.keys(groups)?.length} groups: ${Object.keys(groups).join(", ")}`);
for (const groupName of Object.keys(groups)) {
const projects = groups[groupName];
console.log(`Fetching saved comparisons for ${groupName}`);
const comparisons = await cmp.getGroup(groupName);
if (!comparisons || !comparisons.length) {
console.log(`No comparisons found for ${groupName}`);
continue;
}
// Check if there are any projects that have been modified since last comparison
// or if there are any pairs that are no longer part of the group
const outdated = comparisons.filter( c => {
const baseline_tstamp = projects.find( p => p.pid === c.baseline_pid )?.tstamp;
const monitor_tstamp = projects.find( p => p.pid === c.monitor_pid )?.tstamp;
return (c.tstamp < baseline_tstamp) || (c.tstamp < monitor_tstamp) ||
baseline_tstamp == null || monitor_tstamp == null;
});
for (const comparison of outdated) {
console.log(`Removing stale comparison: ${comparison.baseline_pid}${comparison.monitor_pid}`);
await cmp.remove(comparison.baseline_pid, comparison.monitor_pid);
}
if (projects?.length < 2) {
console.log(`Group ${groupName} has less than two projects. No comparisons are possible`);
continue;
}
// Re-run the comparisons that are not in the database. They may
// be missing either beacause they were not there to start with
// or because we just removed them due to being stale
console.log(`Recalculating group ${groupName}`);
await cmp.saveGroup(groupName);
}
console.log("Comparisons update done");
return 0;
}
if (require.main === module) {
main();
} else {
module.exports = main;
}

View File

@@ -1,65 +0,0 @@
db:
connection_string: "host=localhost port=5432 dbname=dougal user=postgres"
webhooks:
alert:
url: https://gitlab.com/wgp/dougal/software/alerts/notify.json
authkey: ""
# The authorisation key can be provided here or read from the
# environment variable GITLAB_ALERTS_AUTHKEY. The environment
# variable has precedence. It can be saved under the user's
# Bash .profile. This is the recommended way to avoid accidentally
# committing a security token into the git repository.
navigation:
headers:
-
type: udp
port: 30000
meta:
# Anything here gets passed as options to the packet
# saving routine.
epsg: 23031 # Assume this CRS for unqualified E/N data
# Heuristics to apply to detect survey when offline
offline_survey_heuristics: "nearest_preplot"
# Apply the heuristics at most once every…
offline_survey_detect_interval: 10000 # ms
imports:
# For a file to be imported, it must have been last modified at
# least this many seconds ago.
file_min_age: 60
# These paths refer to remote mounts which must be present in order
# for imports to work. If any of these paths are empty, import actions
# (including data deletion) will be inhibited. This is to cope with
# things like transient network failures.
mounts:
- /srv/mnt/Data
# These paths can be exposed to end users via the API. They should
# contain the locations were project data, or any other user data
# that needs to be accessible by Dougal, is located.
#
# This key can be either a string or an object:
# - If a string, it points to the root path for Dougal-accessible data.
# - If an object, there is an implicit root and the first-level
# paths are denoted by the keys, with the values being their
# respective physical paths.
# Non-absolute paths are relative to $DOUGAL_ROOT.
paths: /srv/mnt/Data
queues:
asaqc:
request:
url: "https://api.gateway.equinor.com/vt/v1/api/upload-file-encoded"
args:
method: POST
headers:
Content-Type: application/json
httpsAgent: # The paths here are relative to $DOUGAL_ROOT
cert: etc/ssl/asaqc.crt
key: etc/ssl/asaqc.key

24
etc/config.yaml Normal file
View File

@@ -0,0 +1,24 @@
db:
connection_string: "host=localhost port=5432 dbname=dougal user=postgres"
webhooks:
alert:
url: https://gitlab.com/wgp/dougal/software/alerts/notify.json
authkey: ""
# The authorisation key can be provided here or read from the
# environment variable GITLAB_ALERTS_AUTHKEY. The environment
# variable has precedence. It can be saved under the user's
# Bash .profile. This is the recommended way to avoid accidentally
# committing a security token into the git repository.
navigation:
headers:
-
type: udp
port: 30000
meta:
# Anything here gets passed as options to the packet
# saving routine.
epsg: 23031 # Assume this CRS for unqualified E/N data

View File

@@ -19,124 +19,3 @@ Created with:
```bash
SCHEMA_NAME=survey_X EPSG_CODE=XXXXX $DOUGAL_ROOT/sbin/dump_schema.sh
```
## To create a new Dougal database
Ensure that the following packages are installed:
* `postgresql*-postgis-utils`
* `postgresql*-postgis`
* `postgresql*-contrib` # For B-trees
```bash
psql -U postgres <./database-template.sql
psql -U postgres <./database-version.sql
```
---
# Upgrading PostgreSQL
The following is based on https://en.opensuse.org/SDB:PostgreSQL#Upgrading_major_PostgreSQL_version
```bash
# The following bash code should be checked and executed
# line for line whenever you do an upgrade. The example
# shows the upgrade process from an original installation
# of version 12 up to version 14.
# install the new server as well as the required postgresql-contrib packages:
zypper in postgresql14-server postgresql14-contrib postgresql12-contrib
# If not yet done, copy the configuration create a new PostgreSQL configuration directory...
mkdir /etc/postgresql
# and copy the original file to this global directory
cd /srv/pgsql/data
for i in pg_hba.conf pg_ident.conf postgresql.conf postgresql.auto.conf ; do cp -a $i /etc/postgresql/$i ; done
# Now create a new data-directory and initialize it for usage with the new server
install -d -m 0700 -o postgres -g postgres /srv/pgsql/data14
cd /srv/pgsql/data14
sudo -u postgres /usr/lib/postgresql14/bin/initdb .
# replace the newly generated files by a symlink to the global files.
# After doing so, you may check the difference of the created backup files and
# the files from the former installation
for i in pg_hba.conf pg_ident.conf postgresql.conf postgresql.auto.conf ; do old $i ; ln -s /etc/postgresql/$i .; done
# Copy over special thesaurus files if some exists.
#cp -a /usr/share/postgresql12/tsearch_data/my_thesaurus_german.ths /usr/share/postgresql14/tsearch_data/
# Now it's time to disable the service...
systemctl stop postgresql.service
# And to start the migration. Please ensure, the directories fit to your upgrade path
sudo -u postgres /usr/lib/postgresql14/bin/pg_upgrade --link \
--old-bindir="/usr/lib/postgresql12/bin" \
--new-bindir="/usr/lib/postgresql14/bin" \
--old-datadir="/srv/pgsql/data/" \
--new-datadir="/srv/pgsql/data14/"
# NOTE: If getting the following error:
# lc_collate values for database "postgres" do not match: old "en_US.UTF-8", new "C"
# then:
# cd ..
# rm -rf /srv/pgsql/data14
# install -d -m 0700 -o postgres -g postgres /srv/pgsql/data14
# cd /srv/pgsql/data14
# sudo -u postgres /usr/lib/postgresql14/bin/initdb --locale=en_US.UTF-8 .
#
# and repeat the migration command
# After successfully migrating the data...
cd ..
# if not already symlinked move the old data to a versioned directory matching
# your old installation...
mv data data12
# and set a symlink to the new data directory
ln -sf data14/ data
# Now start the new service
systemctl start postgresql.service
# If everything has been sucessful, you should uninstall old packages...
#zypper rm -u postgresql12 postgresql13
# and remove old data directories
#rm -rf /srv/pgsql/data_OLD_POSTGRES_VERSION_NUMBER
# For good measure:
sudo -u postgres /usr/lib/postgresql14/bin/vacuumdb --all --analyze-in-stages
# If update_extensions.sql exists, apply it.
```
# Restoring from backup
## Whole database
Ensure that nothing is connected to the database.
```bash
psql -U postgres --dbname postgres <<EOF
-- Database: dougal
DROP DATABASE IF EXISTS dougal;
CREATE DATABASE dougal
WITH
OWNER = postgres
ENCODING = 'UTF8'
LC_COLLATE = 'en_GB.UTF-8'
LC_CTYPE = 'en_GB.UTF-8'
TABLESPACE = pg_default
CONNECTION LIMIT = -1;
ALTER DATABASE dougal
SET search_path TO "$user", public, topology;
EOF
# Adjust --jobs according to host machine
pg_restore -U postgres --dbname dougal --clean --if-exists --jobs 32 /path/to/backup
```

View File

@@ -2,8 +2,8 @@
-- PostgreSQL database dump
--
-- Dumped from database version 14.2
-- Dumped by pg_dump version 14.2
-- Dumped from database version 12.4
-- Dumped by pg_dump version 12.4
SET statement_timeout = 0;
SET lock_timeout = 0;
@@ -102,6 +102,20 @@ CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public;
COMMENT ON EXTENSION postgis IS 'PostGIS geometry, geography, and raster spatial types and functions';
--
-- Name: postgis_raster; Type: EXTENSION; Schema: -; Owner: -
--
CREATE EXTENSION IF NOT EXISTS postgis_raster WITH SCHEMA public;
--
-- Name: EXTENSION postgis_raster; Type: COMMENT; Schema: -; Owner:
--
COMMENT ON EXTENSION postgis_raster IS 'PostGIS raster types and functions';
--
-- Name: postgis_sfcgal; Type: EXTENSION; Schema: -; Owner: -
--
@@ -130,221 +144,6 @@ CREATE EXTENSION IF NOT EXISTS postgis_topology WITH SCHEMA topology;
COMMENT ON EXTENSION postgis_topology IS 'PostGIS topology spatial types and functions';
--
-- Name: queue_item_status; Type: TYPE; Schema: public; Owner: postgres
--
CREATE TYPE public.queue_item_status AS ENUM (
'queued',
'cancelled',
'failed',
'sent'
);
ALTER TYPE public.queue_item_status OWNER TO postgres;
--
-- Name: event_meta(timestamp with time zone); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.event_meta(tstamp timestamp with time zone) RETURNS jsonb
LANGUAGE plpgsql
AS $$
BEGIN
RETURN event_meta(tstamp, NULL, NULL);
END;
$$;
ALTER FUNCTION public.event_meta(tstamp timestamp with time zone) OWNER TO postgres;
--
-- Name: FUNCTION event_meta(tstamp timestamp with time zone); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.event_meta(tstamp timestamp with time zone) IS 'Overload of event_meta (timestamptz, integer, integer) for use when searching by timestamp.';
--
-- Name: event_meta(integer, integer); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.event_meta(sequence integer, point integer) RETURNS jsonb
LANGUAGE plpgsql
AS $$
BEGIN
RETURN event_meta(NULL, sequence, point);
END;
$$;
ALTER FUNCTION public.event_meta(sequence integer, point integer) OWNER TO postgres;
--
-- Name: FUNCTION event_meta(sequence integer, point integer); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.event_meta(sequence integer, point integer) IS 'Overload of event_meta (timestamptz, integer, integer) for use when searching by sequence / point.';
--
-- Name: event_meta(timestamp with time zone, integer, integer); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.event_meta(tstamp timestamp with time zone, sequence integer, point integer) RETURNS jsonb
LANGUAGE plpgsql
AS $$
DECLARE
result jsonb;
-- Tolerance is hard-coded, at least until a need to expose arises.
tolerance numeric;
BEGIN
tolerance := 3; -- seconds
-- We search by timestamp if we can, as that's a lot quicker
IF tstamp IS NOT NULL THEN
SELECT meta
INTO result
FROM real_time_inputs rti
WHERE
rti.tstamp BETWEEN (event_meta.tstamp - tolerance * interval '1 second') AND (event_meta.tstamp + tolerance * interval '1 second')
ORDER BY abs(extract('epoch' FROM rti.tstamp - event_meta.tstamp ))
LIMIT 1;
ELSE
SELECT meta
INTO result
FROM real_time_inputs rti
WHERE
(meta->>'_sequence')::integer = event_meta.sequence AND
(meta->>'_point')::integer = event_meta.point
ORDER BY rti.tstamp DESC
LIMIT 1;
END IF;
RETURN result;
END;
$$;
ALTER FUNCTION public.event_meta(tstamp timestamp with time zone, sequence integer, point integer) OWNER TO postgres;
--
-- Name: FUNCTION event_meta(tstamp timestamp with time zone, sequence integer, point integer); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.event_meta(tstamp timestamp with time zone, sequence integer, point integer) IS 'Return the real-time event metadata associated with a sequence / point in the current project or
with a given timestamp. Timestamp that is first searched for in the shot tables
of the current prospect or, if not found, in the real-time data.
Returns a JSONB object.';
--
-- Name: geometry_from_tstamp(timestamp with time zone, numeric); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric) RETURNS record
LANGUAGE sql
AS $$
SELECT
geometry,
extract('epoch' FROM tstamp - ts ) AS delta
FROM real_time_inputs
WHERE
geometry IS NOT NULL AND
tstamp BETWEEN (ts - tolerance * interval '1 second') AND (ts + tolerance * interval '1 second')
ORDER BY abs(extract('epoch' FROM tstamp - ts ))
LIMIT 1;
$$;
ALTER FUNCTION public.geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric) OWNER TO postgres;
--
-- Name: FUNCTION geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.geometry_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT geometry public.geometry, OUT delta numeric) IS 'Get geometry from timestamp';
--
-- Name: interpolate_geometry_from_tstamp(timestamp with time zone, numeric); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric) RETURNS public.geometry
LANGUAGE plpgsql
AS $$
DECLARE
ts0 timestamptz;
ts1 timestamptz;
geom0 geometry;
geom1 geometry;
span numeric;
fraction numeric;
BEGIN
SELECT tstamp, geometry
INTO ts0, geom0
FROM real_time_inputs
WHERE tstamp <= ts
ORDER BY tstamp DESC
LIMIT 1;
SELECT tstamp, geometry
INTO ts1, geom1
FROM real_time_inputs
WHERE tstamp >= ts
ORDER BY tstamp ASC
LIMIT 1;
IF geom0 IS NULL OR geom1 IS NULL THEN
RAISE NOTICE 'Interpolation failed (no straddling data)';
RETURN NULL;
END IF;
-- See if we got an exact match
IF ts0 = ts THEN
RETURN geom0;
ELSIF ts1 = ts THEN
RETURN geom1;
END IF;
span := extract('epoch' FROM ts1 - ts0);
IF span > maxspan THEN
RAISE NOTICE 'Interpolation timespan % outside maximum requested (%)', span, maxspan;
RETURN NULL;
END IF;
fraction := extract('epoch' FROM ts - ts0) / span;
IF fraction < 0 OR fraction > 1 THEN
RAISE NOTICE 'Requested timestamp % outside of interpolation span (fraction: %)', ts, fraction;
RETURN NULL;
END IF;
RETURN ST_LineInterpolatePoint(St_MakeLine(geom0, geom1), fraction);
END;
$$;
ALTER FUNCTION public.interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric) OWNER TO postgres;
--
-- Name: FUNCTION interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.interpolate_geometry_from_tstamp(ts timestamp with time zone, maxspan numeric) IS 'Interpolate a position over a given maximum timespan (in seconds)
based on real-time inputs. Returns a POINT geometry.';
--
-- Name: notify(); Type: FUNCTION; Schema: public; Owner: postgres
--
@@ -383,110 +182,23 @@ $$;
ALTER FUNCTION public.notify() OWNER TO postgres;
--
-- Name: sequence_shot_from_tstamp(timestamp with time zone); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric) RETURNS record
LANGUAGE sql
AS $$
SELECT * FROM public.sequence_shot_from_tstamp(ts, 3);
$$;
ALTER FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric) OWNER TO postgres;
--
-- Name: FUNCTION sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, OUT sequence numeric, OUT point numeric, OUT delta numeric) IS 'Get sequence and shotpoint from timestamp.
Overloaded form in which the tolerance value is implied and defaults to three seconds.';
--
-- Name: sequence_shot_from_tstamp(timestamp with time zone, numeric); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric) RETURNS record
LANGUAGE sql
AS $$
SELECT
(meta->>'_sequence')::numeric AS sequence,
(meta->>'_point')::numeric AS point,
extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ) AS delta
FROM real_time_inputs
WHERE
meta ? '_sequence' AND
abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts )) < tolerance
ORDER BY abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ))
LIMIT 1;
$$;
ALTER FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric) OWNER TO postgres;
--
-- Name: FUNCTION sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric); Type: COMMENT; Schema: public; Owner: postgres
--
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(ts timestamp with time zone, tolerance numeric, OUT sequence numeric, OUT point numeric, OUT delta numeric) IS 'Get sequence and shotpoint from timestamp.
Given a timestamp this function returns the closest shot to it within the given tolerance value.
This uses the `real_time_inputs` table and it does not give an indication of which project the shotpoint belongs to. It is assumed that a single project is being acquired at a given time.';
--
-- Name: set_survey(text); Type: PROCEDURE; Schema: public; Owner: postgres
--
CREATE PROCEDURE public.set_survey(IN project_id text)
CREATE PROCEDURE public.set_survey(project_id text)
LANGUAGE sql
AS $$
SELECT set_config('search_path', (SELECT schema||',public' FROM public.projects WHERE pid = lower(project_id)), false);
$$;
ALTER PROCEDURE public.set_survey(IN project_id text) OWNER TO postgres;
--
-- Name: update_timestamp(); Type: FUNCTION; Schema: public; Owner: postgres
--
CREATE FUNCTION public.update_timestamp() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF NEW.updated_on IS NOT NULL THEN
NEW.updated_on := current_timestamp;
END IF;
RETURN NEW;
EXCEPTION
WHEN undefined_column THEN RETURN NEW;
END;
$$;
ALTER FUNCTION public.update_timestamp() OWNER TO postgres;
ALTER PROCEDURE public.set_survey(project_id text) OWNER TO postgres;
SET default_tablespace = '';
SET default_table_access_method = heap;
--
-- Name: info; Type: TABLE; Schema: public; Owner: postgres
--
CREATE TABLE public.info (
key text NOT NULL,
value jsonb
);
ALTER TABLE public.info OWNER TO postgres;
--
-- Name: projects; Type: TABLE; Schema: public; Owner: postgres
--
@@ -501,46 +213,6 @@ CREATE TABLE public.projects (
ALTER TABLE public.projects OWNER TO postgres;
--
-- Name: queue_items; Type: TABLE; Schema: public; Owner: postgres
--
CREATE TABLE public.queue_items (
item_id integer NOT NULL,
status public.queue_item_status DEFAULT 'queued'::public.queue_item_status NOT NULL,
payload jsonb NOT NULL,
results jsonb DEFAULT '{}'::jsonb NOT NULL,
created_on timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_on timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
not_before timestamp with time zone DEFAULT '1970-01-01 00:00:00+00'::timestamp with time zone NOT NULL,
parent_id integer
);
ALTER TABLE public.queue_items OWNER TO postgres;
--
-- Name: queue_items_item_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
--
CREATE SEQUENCE public.queue_items_item_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER TABLE public.queue_items_item_id_seq OWNER TO postgres;
--
-- Name: queue_items_item_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
--
ALTER SEQUENCE public.queue_items_item_id_seq OWNED BY public.queue_items.item_id;
--
-- Name: real_time_inputs; Type: TABLE; Schema: public; Owner: postgres
--
@@ -554,21 +226,6 @@ CREATE TABLE public.real_time_inputs (
ALTER TABLE public.real_time_inputs OWNER TO postgres;
--
-- Name: queue_items item_id; Type: DEFAULT; Schema: public; Owner: postgres
--
ALTER TABLE ONLY public.queue_items ALTER COLUMN item_id SET DEFAULT nextval('public.queue_items_item_id_seq'::regclass);
--
-- Name: info info_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
--
ALTER TABLE ONLY public.info
ADD CONSTRAINT info_pkey PRIMARY KEY (key);
--
-- Name: projects projects_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres
--
@@ -593,14 +250,6 @@ ALTER TABLE ONLY public.projects
ADD CONSTRAINT projects_schema_key UNIQUE (schema);
--
-- Name: queue_items queue_items_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
--
ALTER TABLE ONLY public.queue_items
ADD CONSTRAINT queue_items_pkey PRIMARY KEY (item_id);
--
-- Name: tstamp_idx; Type: INDEX; Schema: public; Owner: postgres
--
@@ -608,13 +257,6 @@ ALTER TABLE ONLY public.queue_items
CREATE INDEX tstamp_idx ON public.real_time_inputs USING btree (tstamp DESC);
--
-- Name: info info_tg; Type: TRIGGER; Schema: public; Owner: postgres
--
CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON public.info FOR EACH ROW EXECUTE FUNCTION public.notify('info');
--
-- Name: projects projects_tg; Type: TRIGGER; Schema: public; Owner: postgres
--
@@ -622,20 +264,6 @@ CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON public.info FOR EACH
CREATE TRIGGER projects_tg AFTER INSERT OR DELETE OR UPDATE ON public.projects FOR EACH ROW EXECUTE FUNCTION public.notify('project');
--
-- Name: queue_items queue_items_tg0; Type: TRIGGER; Schema: public; Owner: postgres
--
CREATE TRIGGER queue_items_tg0 BEFORE INSERT OR UPDATE ON public.queue_items FOR EACH ROW EXECUTE FUNCTION public.update_timestamp();
--
-- Name: queue_items queue_items_tg1; Type: TRIGGER; Schema: public; Owner: postgres
--
CREATE TRIGGER queue_items_tg1 AFTER INSERT OR DELETE OR UPDATE ON public.queue_items FOR EACH ROW EXECUTE FUNCTION public.notify('queue_items');
--
-- Name: real_time_inputs real_time_inputs_tg; Type: TRIGGER; Schema: public; Owner: postgres
--
@@ -643,14 +271,6 @@ CREATE TRIGGER queue_items_tg1 AFTER INSERT OR DELETE OR UPDATE ON public.queue_
CREATE TRIGGER real_time_inputs_tg AFTER INSERT ON public.real_time_inputs FOR EACH ROW EXECUTE FUNCTION public.notify('realtime');
--
-- Name: queue_items queue_items_parent_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
--
ALTER TABLE ONLY public.queue_items
ADD CONSTRAINT queue_items_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES public.queue_items(item_id);
--
-- PostgreSQL database dump complete
--

View File

@@ -1,5 +0,0 @@
\connect dougal
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.5"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.5"}' WHERE public.info.key = 'version';

File diff suppressed because it is too large Load Diff

View File

@@ -1,34 +0,0 @@
# Database schema upgrades
When the database schema needs to be upgraded in order to provide new functionality, fix errors, etc., an upgrade script should be added to this directory.
The script can be SQL (preferred) or anything else (Bash, Python, …) in the event of complex upgrades.
The script itself should:
* document what the intended changes are;
* contain instructions on how to run it;
* make the user aware of any non-obvious side effects; and
* say if it is safe to run the script multiple times on the
* same schema / database.
## Naming
Script files should be named `upgrade-<index>-<commit-id-old>-<commit-id-new>-v<schema-version>.sql`, where:
* `<index>` is a correlative two-digit index. When reaching 99, existing files will be renamed to a three digit index (001-099) and new files will use three digits.
* `<commit-id-old>` is the ID of the Git commit that last introduced a schema change.
* `<commit-id-new>` is the ID of the first Git commit expecting the updated schema.
* `<schema-version>` is the version of the schema.
Note: the `<schema-version>` value should be updated with every change and it should be the same as reported by:
```sql
select value->>'db_schema' as db_schema from public.info where key = 'version';
```
If necessary, the wanted schema version must also be updated in `package.json`.
## Running
Schema upgrades are always run manually.

View File

@@ -1,22 +0,0 @@
-- Upgrade the database from commit 78adb2be to 7917eeeb.
--
-- This upgrade affects the `public` schema only.
--
-- It creates a new table, `info`, for storing arbitrary JSON
-- data not belonging to a specific project. Currently used
-- for the equipment list, it could also serve to store user
-- details, configuration settings, system state, etc.
--
-- To apply, run as the dougal user:
--
-- psql < $THIS_FILE
--
-- NOTE: It will fail harmlessly if applied twice.
CREATE TABLE IF NOT EXISTS public.info (
key text NOT NULL primary key,
value jsonb
);
CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON public.info FOR EACH ROW EXECUTE FUNCTION public.notify('info');

View File

@@ -1,160 +0,0 @@
-- Upgrade the database from commit 6e7ba82e to 53f71f70.
--
-- NOTE: This upgrade must be applied to every schema in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This merges two changes to the database.
-- The first one (commit 5de64e6b) modifies the `event` view to return
-- the `meta` column of timed and sequence events.
-- The second one (commit 53f71f70) adds a primary key constraint to
-- events_seq_labels (there is already an equivalent constraint on
-- events_seq_timed).
--
-- To apply, run as the dougal user, for every schema in the database:
--
-- psql <<EOF
-- SET search_path TO survey_*,public;
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It will fail harmlessly if applied twice.
BEGIN;
DROP VIEW events_seq_timed CASCADE; -- Brings down events too
ALTER TABLE ONLY events_seq_labels
ADD CONSTRAINT events_seq_labels_pkey PRIMARY KEY (id, label);
CREATE OR REPLACE VIEW events_seq_timed AS
SELECT s.sequence,
s.point,
s.id,
s.remarks,
rs.line,
rs.objref,
rs.tstamp,
rs.hash,
s.meta,
rs.geometry
FROM (events_seq s
LEFT JOIN raw_shots rs USING (sequence, point));
CREATE OR REPLACE VIEW events AS
WITH qc AS (
SELECT rs.sequence,
rs.point,
ARRAY[jsonb_array_elements_text(q.labels)] AS labels
FROM raw_shots rs,
LATERAL jsonb_path_query(rs.meta, '$."qc".*."labels"'::jsonpath) q(labels)
)
SELECT 'sequence'::text AS type,
false AS virtual,
s.sequence,
s.point,
s.id,
s.remarks,
s.line,
s.objref,
s.tstamp,
s.hash,
s.meta,
(public.st_asgeojson(public.st_transform(s.geometry, 4326)))::jsonb AS geometry,
ARRAY( SELECT esl.label
FROM events_seq_labels esl
WHERE (esl.id = s.id)) AS labels
FROM events_seq_timed s
UNION
SELECT 'timed'::text AS type,
false AS virtual,
rs.sequence,
rs.point,
t.id,
t.remarks,
rs.line,
rs.objref,
t.tstamp,
rs.hash,
t.meta,
(t.meta -> 'geometry'::text) AS geometry,
ARRAY( SELECT etl.label
FROM events_timed_labels etl
WHERE (etl.id = t.id)) AS labels
FROM ((events_timed t
LEFT JOIN events_timed_seq ts USING (id))
LEFT JOIN raw_shots rs USING (sequence, point))
UNION
SELECT 'midnight shot'::text AS type,
true AS virtual,
v1.sequence,
v1.point,
((v1.sequence * 100000) + v1.point) AS id,
''::text AS remarks,
v1.line,
v1.objref,
v1.tstamp,
v1.hash,
'{}'::jsonb meta,
(public.st_asgeojson(public.st_transform(v1.geometry, 4326)))::jsonb AS geometry,
ARRAY[v1.label] AS labels
FROM events_midnight_shot v1
UNION
SELECT 'qc'::text AS type,
true AS virtual,
rs.sequence,
rs.point,
((10000000 + (rs.sequence * 100000)) + rs.point) AS id,
(q.remarks)::text AS remarks,
rs.line,
rs.objref,
rs.tstamp,
rs.hash,
'{}'::jsonb meta,
(public.st_asgeojson(public.st_transform(rs.geometry, 4326)))::jsonb AS geometry,
('{QC}'::text[] || qc.labels) AS labels
FROM (raw_shots rs
LEFT JOIN qc USING (sequence, point)),
LATERAL jsonb_path_query(rs.meta, '$."qc".*."results"'::jsonpath) q(remarks)
WHERE (rs.meta ? 'qc'::text);
CREATE OR REPLACE VIEW final_lines_summary AS
WITH summary AS (
SELECT DISTINCT fs.sequence,
first_value(fs.point) OVER w AS fsp,
last_value(fs.point) OVER w AS lsp,
first_value(fs.tstamp) OVER w AS ts0,
last_value(fs.tstamp) OVER w AS ts1,
count(fs.point) OVER w AS num_points,
public.st_distance(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) AS length,
((public.st_azimuth(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
FROM final_shots fs
WINDOW w AS (PARTITION BY fs.sequence ORDER BY fs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT fl.sequence,
fl.line,
s.fsp,
s.lsp,
s.ts0,
s.ts1,
(s.ts1 - s.ts0) AS duration,
s.num_points,
(( SELECT count(*) AS count
FROM preplot_points
WHERE ((preplot_points.line = fl.line) AND (((preplot_points.point >= s.fsp) AND (preplot_points.point <= s.lsp)) OR ((preplot_points.point >= s.lsp) AND (preplot_points.point <= s.fsp))))) - s.num_points) AS missing_shots,
s.length,
s.azimuth,
fl.remarks,
fl.meta
FROM (summary s
JOIN final_lines fl USING (sequence));
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,171 +0,0 @@
-- Upgrade the database from commit 53f71f70 to 4d977848.
--
-- NOTE: This upgrade must be applied to every schema in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This adds:
--
-- * label_in_sequence (_sequence integer, _label text):
-- Returns events containing the specified label.
--
-- * handle_final_line_events (_seq integer, _label text, _column text):
-- - If _label does not exist in the events for sequence _seq:
-- it adds a new _label label at the shotpoint obtained from
-- final_lines_summary[_column].
-- - If _label does exist (and hasn't been auto-added by this function
-- in a previous run), it will add information about it to the final
-- line's metadata.
--
-- * final_line_post_import (_seq integer):
-- Calls handle_final_line_events() on the given sequence to check
-- for FSP, FGSP, LGSP and LSP labels.
--
-- * events_seq_labels_single ():
-- Trigger function to ensure that labels that have the attribute
-- `model.multiple` set to `false` occur at most only once per
-- sequence. If a new instance is added to a sequence, the previous
-- instance is deleted.
--
-- * Trigger on events_seq_labels that calls events_seq_labels_single().
--
-- * Trigger on events_timed_labels that calls events_seq_labels_single().
--
-- To apply, run as the dougal user, for every schema in the database:
--
-- psql <<EOF
-- SET search_path TO survey_*,public;
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It will fail harmlessly if applied twice.
BEGIN;
CREATE OR REPLACE FUNCTION label_in_sequence (_sequence integer, _label text)
RETURNS events
LANGUAGE sql
AS $$
SELECT * FROM events WHERE sequence = _sequence AND _label = ANY(labels);
$$;
CREATE OR REPLACE PROCEDURE handle_final_line_events (_seq integer, _label text, _column text)
LANGUAGE plpgsql
AS $$
DECLARE
_line final_lines_summary%ROWTYPE;
_column_value integer;
_tg_name text := 'final_line';
_event events%ROWTYPE;
event_id integer;
BEGIN
SELECT * INTO _line FROM final_lines_summary WHERE sequence = _seq;
_event := label_in_sequence(_seq, _label);
_column_value := row_to_json(_line)->>_column;
--RAISE NOTICE '% is %', _label, _event;
--RAISE NOTICE 'Line is %', _line;
--RAISE NOTICE '% is % (%)', _column, _column_value, _label;
IF _event IS NULL THEN
--RAISE NOTICE 'We will populate the event log from the sequence data';
SELECT id INTO event_id FROM events_seq WHERE sequence = _seq AND point = _column_value ORDER BY id LIMIT 1;
IF event_id IS NULL THEN
--RAISE NOTICE '… but there is no existing event so we create a new one for sequence % and point %', _line.sequence, _column_value;
INSERT INTO events_seq (sequence, point, remarks)
VALUES (_line.sequence, _column_value, format('%s %s', _label, (SELECT meta->>'lineName' FROM final_lines WHERE sequence = _seq)))
RETURNING id INTO event_id;
--RAISE NOTICE 'Created event_id %', event_id;
END IF;
--RAISE NOTICE 'Remove any other auto-inserted % labels in sequence %', _label, _seq;
DELETE FROM events_seq_labels
WHERE label = _label AND id = (SELECT id FROM events_seq WHERE sequence = _seq AND meta->'auto' ? _label);
--RAISE NOTICE 'We now add a label to the event (id, label) = (%, %)', event_id, _label;
INSERT INTO events_seq_labels (id, label) VALUES (event_id, _label) ON CONFLICT ON CONSTRAINT events_seq_labels_pkey DO NOTHING;
--RAISE NOTICE 'And also clear the %: % flag from meta.auto for any existing events for sequence %', _label, _tg_name, _seq;
UPDATE events_seq
SET meta = meta #- ARRAY['auto', _label]
WHERE meta->'auto' ? _label AND sequence = _seq AND id <> event_id;
--RAISE NOTICE 'Finally, flag the event as having been had label % auto-created by %', _label, _tg_name;
UPDATE events_seq
SET meta = jsonb_set(jsonb_set(meta, '{auto}', COALESCE(meta->'auto', '{}')), ARRAY['auto', _label], to_jsonb(_tg_name))
WHERE id = event_id;
ELSE
--RAISE NOTICE 'We may populate the sequence meta from the event log';
--RAISE NOTICE 'Unless the event log was populated by us previously';
--RAISE NOTICE 'Populated by us previously? %', _event.meta->'auto'->>_label = _tg_name;
IF _event.meta->'auto'->>_label IS DISTINCT FROM _tg_name THEN
--RAISE NOTICE 'Adding % found in events log to final_line meta', _label;
UPDATE final_lines
SET meta = jsonb_set(meta, ARRAY[_label], to_jsonb(_event.point))
WHERE sequence = _seq;
--RAISE NOTICE 'Clearing the %: % flag from meta.auto for any existing events in sequence %', _label, _tg_name, _seq;
UPDATE events_seq
SET meta = meta #- ARRAY['auto', _label]
WHERE sequence = _seq AND meta->'auto'->>_label = _tg_name;
END IF;
END IF;
END;
$$;
CREATE OR REPLACE PROCEDURE final_line_post_import (_seq integer)
LANGUAGE plpgsql
AS $$
BEGIN
CALL handle_final_line_events(_seq, 'FSP', 'fsp');
CALL handle_final_line_events(_seq, 'FGSP', 'fsp');
CALL handle_final_line_events(_seq, 'LGSP', 'lsp');
CALL handle_final_line_events(_seq, 'LSP', 'lsp');
END;
$$;
CREATE OR REPLACE FUNCTION events_seq_labels_single ()
RETURNS trigger
LANGUAGE plpgsql
AS $$
DECLARE _sequence integer;
BEGIN
IF EXISTS(SELECT 1 FROM labels WHERE name = NEW.label AND (data->'model'->'multiple')::boolean IS FALSE) THEN
SELECT sequence INTO _sequence FROM events WHERE id = NEW.id;
DELETE
FROM events_seq_labels
WHERE
id <> NEW.id
AND label = NEW.label
AND id IN (SELECT id FROM events_seq WHERE sequence = _sequence);
DELETE
FROM events_timed_labels
WHERE
id <> NEW.id
AND label = NEW.label
AND id IN (SELECT id FROM events_timed_seq WHERE sequence = _sequence);
END IF;
RETURN NULL;
END;
$$;
CREATE TRIGGER events_seq_labels_single_tg AFTER INSERT OR UPDATE ON events_seq_labels FOR EACH ROW EXECUTE FUNCTION events_seq_labels_single();
CREATE TRIGGER events_seq_labels_single_tg AFTER INSERT OR UPDATE ON events_timed_labels FOR EACH ROW EXECUTE FUNCTION events_seq_labels_single();
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,94 +0,0 @@
-- Upgrade the database from commit 4d977848 to 3d70a460.
--
-- NOTE: This upgrade must be applied to every schema in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This adds the `meta` column to the output of the following views:
--
-- * raw_lines_summary; and
-- * sequences_summary
--
-- To apply, run as the dougal user, for every schema in the database:
--
-- psql <<EOF
-- SET search_path TO survey_*,public;
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
BEGIN;
CREATE OR REPLACE VIEW raw_lines_summary AS
WITH summary AS (
SELECT DISTINCT rs.sequence,
first_value(rs.point) OVER w AS fsp,
last_value(rs.point) OVER w AS lsp,
first_value(rs.tstamp) OVER w AS ts0,
last_value(rs.tstamp) OVER w AS ts1,
count(rs.point) OVER w AS num_points,
count(pp.point) OVER w AS num_preplots,
public.st_distance(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) AS length,
((public.st_azimuth(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
FROM (raw_shots rs
LEFT JOIN preplot_points pp USING (line, point))
WINDOW w AS (PARTITION BY rs.sequence ORDER BY rs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT rl.sequence,
rl.line,
s.fsp,
s.lsp,
s.ts0,
s.ts1,
(s.ts1 - s.ts0) AS duration,
s.num_points,
s.num_preplots,
(( SELECT count(*) AS count
FROM preplot_points
WHERE ((preplot_points.line = rl.line) AND (((preplot_points.point >= s.fsp) AND (preplot_points.point <= s.lsp)) OR ((preplot_points.point >= s.lsp) AND (preplot_points.point <= s.fsp))))) - s.num_preplots) AS missing_shots,
s.length,
s.azimuth,
rl.remarks,
rl.ntbp,
rl.meta
FROM (summary s
JOIN raw_lines rl USING (sequence));
DROP VIEW sequences_summary;
CREATE OR REPLACE VIEW sequences_summary AS
SELECT rls.sequence,
rls.line,
rls.fsp,
rls.lsp,
fls.fsp AS fsp_final,
fls.lsp AS lsp_final,
rls.ts0,
rls.ts1,
fls.ts0 AS ts0_final,
fls.ts1 AS ts1_final,
rls.duration,
fls.duration AS duration_final,
rls.num_preplots,
COALESCE(fls.num_points, rls.num_points) AS num_points,
COALESCE(fls.missing_shots, rls.missing_shots) AS missing_shots,
COALESCE(fls.length, rls.length) AS length,
COALESCE(fls.azimuth, rls.azimuth) AS azimuth,
rls.remarks,
fls.remarks AS remarks_final,
rls.meta,
fls.meta AS meta_final,
CASE
WHEN (rls.ntbp IS TRUE) THEN 'ntbp'::text
WHEN (fls.sequence IS NULL) THEN 'raw'::text
ELSE 'final'::text
END AS status
FROM (raw_lines_summary rls
LEFT JOIN final_lines_summary fls USING (sequence));
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,33 +0,0 @@
-- Upgrade the database from commit 3d70a460 to 0983abac.
--
-- NOTE: This upgrade must be applied to every schema in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This:
--
-- * makes the primary key on planned_lines deferrable; and
-- * changes the planned_lines trigger from statement to row.
--
-- To apply, run as the dougal user, for every schema in the database:
--
-- psql <<EOF
-- SET search_path TO survey_*,public;
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
BEGIN;
ALTER TABLE planned_lines DROP CONSTRAINT planned_lines_pkey;
ALTER TABLE planned_lines ADD CONSTRAINT planned_lines_pkey PRIMARY KEY (sequence) DEFERRABLE;
DROP TRIGGER planned_lines_tg ON planned_lines;
CREATE TRIGGER planned_lines_tg AFTER INSERT OR DELETE OR UPDATE ON planned_lines FOR EACH ROW EXECUTE FUNCTION public.notify('planned_lines');
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,207 +0,0 @@
-- Upgrade the database from commit 0983abac to 81d9ea19.
--
-- NOTE: This upgrade must be applied to every schema in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This defines a new procedure adjust_planner() which resolves some
-- conflicts between shot sequences and the planner, such as removing
-- sequences that have been shot, renumbering, or adjusting the planned
-- times.
--
-- It is meant to be called at regular intervals by an external process,
-- such as the runner (software/bin/runner.sh).
--
-- A trigger for changes to the schema's `info` table is also added.
--
-- To apply, run as the dougal user, for every schema in the database:
--
-- psql <<EOF
-- SET search_path TO survey_*,public;
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
BEGIN;
CREATE OR REPLACE PROCEDURE adjust_planner ()
LANGUAGE plpgsql
AS $$
DECLARE
_planner_config jsonb;
_planned_line planned_lines%ROWTYPE;
_lag interval;
_last_sequence sequences_summary%ROWTYPE;
_deltatime interval;
_shotinterval interval;
_tstamp timestamptz;
_incr integer;
BEGIN
SET CONSTRAINTS planned_lines_pkey DEFERRED;
SELECT data->'planner'
INTO _planner_config
FROM file_data
WHERE data ? 'planner';
SELECT *
INTO _last_sequence
FROM sequences_summary
ORDER BY sequence DESC
LIMIT 1;
SELECT *
INTO _planned_line
FROM planned_lines
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
SELECT
COALESCE(
((lead(ts0) OVER (ORDER BY sequence)) - ts1),
make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer)
)
INTO _lag
FROM planned_lines
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
_incr = sign(_last_sequence.lsp - _last_sequence.fsp);
RAISE NOTICE '_planner_config: %', _planner_config;
RAISE NOTICE '_last_sequence: %', _last_sequence;
RAISE NOTICE '_planned_line: %', _planned_line;
RAISE NOTICE '_incr: %', _incr;
-- Does the latest sequence match a planned sequence?
IF _planned_line IS NULL THEN -- No it doesn't
RAISE NOTICE 'Latest sequence shot does not match a planned sequence';
SELECT * INTO _planned_line FROM planned_lines ORDER BY sequence ASC LIMIT 1;
RAISE NOTICE '_planned_line: %', _planned_line;
IF _planned_line.sequence <= _last_sequence.sequence THEN
RAISE NOTICE 'Renumbering the planned sequences starting from %', _planned_line.sequence + 1;
-- Renumber the planned sequences starting from last shot sequence number + 1
UPDATE planned_lines
SET sequence = sequence + _last_sequence.sequence - _planned_line.sequence + 1;
END IF;
-- The correction to make to the first planned line's ts0 will be based on either the last
-- sequence's EOL + default line change time or the current time, whichever is later.
_deltatime := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1) + make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer), current_timestamp) - _planned_line.ts0;
-- Is the first of the planned lines start time in the past? (±5 mins)
IF _planned_line.ts0 < (current_timestamp - make_interval(mins => 5)) THEN
RAISE NOTICE 'First planned line is in the past. Adjusting times by %', _deltatime;
-- Adjust the start / end time of the planned lines by assuming that we are at
-- `defaultLineChangeDuration` minutes away from SOL of the first planned line.
UPDATE planned_lines
SET
ts0 = ts0 + _deltatime,
ts1 = ts1 + _deltatime;
END IF;
ELSE -- Yes it does
RAISE NOTICE 'Latest sequence does match a planned sequence: %, %', _planned_line.sequence, _planned_line.line;
-- Is it online?
IF EXISTS(SELECT 1 FROM raw_lines_files WHERE sequence = _last_sequence.sequence AND hash = '*online*') THEN
-- Yes it is
RAISE NOTICE 'Sequence % is online', _last_sequence.sequence;
-- Let us get the SOL from the events log if we can
RAISE NOTICE 'Trying to set fsp, ts0 from events log FSP, FGSP';
WITH e AS (
SELECT * FROM events
WHERE
sequence = _last_sequence.sequence
AND ('FSP' = ANY(labels) OR 'FGSP' = ANY(labels))
ORDER BY tstamp LIMIT 1
)
UPDATE planned_lines
SET
fsp = COALESCE(e.point, fsp),
ts0 = COALESCE(e.tstamp, ts0)
FROM e
WHERE planned_lines.sequence = _last_sequence.sequence;
-- Shot interval
_shotinterval := (_last_sequence.ts1 - _last_sequence.ts0) / abs(_last_sequence.lsp - _last_sequence.fsp);
RAISE NOTICE 'Estimating EOL from current shot interval: %', _shotinterval;
SELECT (abs(lsp-fsp) * _shotinterval + ts0) - ts1
INTO _deltatime
FROM planned_lines
WHERE sequence = _last_sequence.sequence;
---- Set ts1 for the current sequence
--UPDATE planned_lines
--SET
--ts1 = (abs(lsp-fsp) * _shotinterval) + ts0
--WHERE sequence = _last_sequence.sequence;
RAISE NOTICE 'Adjustment is %', _deltatime;
IF abs(EXTRACT(EPOCH FROM _deltatime)) < 8 THEN
RAISE NOTICE 'Adjustment too small (< 8 s), so not applying it';
RETURN;
END IF;
-- Adjust ts1 for the current sequence
UPDATE planned_lines
SET ts1 = ts1 + _deltatime
WHERE sequence = _last_sequence.sequence;
-- Now shift all sequences after
UPDATE planned_lines
SET ts0 = ts0 + _deltatime, ts1 = ts1 + _deltatime
WHERE sequence > _last_sequence.sequence;
RAISE NOTICE 'Deleting planned sequences before %', _planned_line.sequence;
-- Remove all previous planner entries.
DELETE
FROM planned_lines
WHERE sequence < _last_sequence.sequence;
ELSE
-- No it isn't
RAISE NOTICE 'Sequence % is offline', _last_sequence.sequence;
-- We were supposed to finish at _planned_line.ts1 but we finished at:
_tstamp := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1), current_timestamp);
-- WARNING Next line is for testing only
--_tstamp := COALESCE(_last_sequence.ts1_final, _last_sequence.ts1);
-- So we need to adjust timestamps by:
_deltatime := _tstamp - _planned_line.ts1;
RAISE NOTICE 'Planned end: %, actual end: % (%, %)', _planned_line.ts1, _tstamp, _planned_line.sequence, _last_sequence.sequence;
RAISE NOTICE 'Shifting times by % for sequences > %', _deltatime, _planned_line.sequence;
-- NOTE: This won't work if sequences are not, err… sequential.
-- NOTE: This has been known to happen in 2020.
UPDATE planned_lines
SET
ts0 = ts0 + _deltatime,
ts1 = ts1 + _deltatime
WHERE sequence > _planned_line.sequence;
RAISE NOTICE 'Deleting planned sequences up to %', _planned_line.sequence;
-- Remove all previous planner entries.
DELETE
FROM planned_lines
WHERE sequence <= _last_sequence.sequence;
END IF;
END IF;
END;
$$;
DROP TRIGGER IF EXISTS info_tg ON info;
CREATE TRIGGER info_tg AFTER INSERT OR DELETE OR UPDATE ON info FOR EACH ROW EXECUTE FUNCTION public.notify('info');
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,91 +0,0 @@
-- Upgrade the database from commit 81d9ea19 to 0a10c897.
--
-- NOTE: This upgrade must be applied to every schema in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This defines a new function ij_error(line, point, geometry) which
-- returns the crossline and inline distance (in metres) between the
-- geometry (which must be a point) and the preplot corresponding to
-- line / point.
--
-- To apply, run as the dougal user, for every schema in the database:
--
-- psql <<EOF
-- SET search_path TO survey_*,public;
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
BEGIN;
-- Return the crossline, inline error of `geom` with respect to `line` and `point`
-- in the project's binning grid.
CREATE OR REPLACE FUNCTION ij_error(line double precision, point double precision, geom public.geometry)
RETURNS public.geometry(Point, 0)
LANGUAGE plpgsql STABLE LEAKPROOF
AS $$
DECLARE
bp jsonb := binning_parameters();
ij public.geometry := to_binning_grid(geom, bp);
theta numeric := (bp->>'theta')::numeric * pi() / 180;
I_inc numeric DEFAULT 1;
J_inc numeric DEFAULT 1;
I_width numeric := (bp->>'I_width')::numeric;
J_width numeric := (bp->>'J_width')::numeric;
a numeric := (I_inc/I_width) * cos(theta);
b numeric := (I_inc/I_width) * -sin(theta);
c numeric := (J_inc/J_width) * sin(theta);
d numeric := (J_inc/J_width) * cos(theta);
xoff numeric := (bp->'origin'->>'I')::numeric;
yoff numeric := (bp->'origin'->>'J')::numeric;
E0 numeric := (bp->'origin'->>'easting')::numeric;
N0 numeric := (bp->'origin'->>'northing')::numeric;
error_i double precision;
error_j double precision;
BEGIN
error_i := (public.st_x(ij) - line) * I_width;
error_j := (public.st_y(ij) - point) * J_width;
RETURN public.ST_MakePoint(error_i, error_j);
END
$$;
-- Return the list of points and metadata for all sequences.
-- Only points which have a corresponding preplot are returned.
-- If available, final positions are returned as well, if not they
-- are NULL.
-- Likewise, crossline / inline errors are also returned as a PostGIS
-- 2D point both for raw and final data.
CREATE OR REPLACE VIEW sequences_detail AS
SELECT
rl.sequence, rl.line AS sailline,
rs.line, rs.point,
rs.tstamp,
rs.objref objRefRaw, fs.objref objRefFinal,
ST_Transform(pp.geometry, 4326) geometryPreplot,
ST_Transform(rs.geometry, 4326) geometryRaw,
ST_Transform(fs.geometry, 4326) geometryFinal,
ij_error(rs.line, rs.point, rs.geometry) errorRaw,
ij_error(rs.line, rs.point, fs.geometry) errorFinal,
json_build_object('preplot', pp.meta, 'raw', rs.meta, 'final', fs.meta) meta
FROM
raw_lines rl
INNER JOIN raw_shots rs USING (sequence)
INNER JOIN preplot_points pp ON rs.line = pp.line AND rs.point = pp.point
LEFT JOIN final_shots fs ON rl.sequence = fs.sequence AND rs.point = fs.point;
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,75 +0,0 @@
-- Upgrade the database from commit 81d9ea19 to 74b3de5c.
--
-- This upgrade affects the `public` schema only.
--
-- It creates a new table, `queue_items`, for storing
-- requests and responses related to inter-API communication.
-- At the moment this means Equinor's ASAQC API, but it
-- should be applicable to others as well if the need
-- arises.
--
-- As well as the table, it adds:
--
-- * `queue_item_status`, an ENUM type.
-- * `update_timestamp`, a trigger function.
-- * Two triggers on `queue_items`.
--
-- To apply, run as the dougal user:
--
-- psql < $THIS_FILE
--
-- NOTE: It will fail harmlessly if applied twice.
-- Queues are global, not per project,
-- so they go in the `public` schema.
CREATE TYPE queue_item_status
AS ENUM (
'queued',
'cancelled',
'failed',
'sent'
);
CREATE TABLE IF NOT EXISTS queue_items (
item_id serial NOT NULL PRIMARY KEY,
-- One day we may want multiple queues, in that case we will
-- have a queue_id and a relation of queue definitions.
-- But not right now.
-- queue_id integer NOT NULL REFERENCES queues (queue_id),
status queue_item_status NOT NULL DEFAULT 'queued',
payload jsonb NOT NULL,
results jsonb NOT NULL DEFAULT '{}'::jsonb,
created_on timestamptz NOT NULL DEFAULT current_timestamp,
updated_on timestamptz NOT NULL DEFAULT current_timestamp,
not_before timestamptz NOT NULL DEFAULT '1970-01-01T00:00:00Z',
parent_id integer NULL REFERENCES queue_items (item_id)
);
-- Sets `updated_on` to current_timestamp unless an explicit
-- timestamp is part of the update.
--
-- This function can be reused with any table that has (or could have)
-- an `updated_on` column of time timestamptz.
CREATE OR REPLACE FUNCTION update_timestamp () RETURNS trigger AS
$$
BEGIN
IF NEW.updated_on IS NOT NULL THEN
NEW.updated_on := current_timestamp;
END IF;
RETURN NEW;
EXCEPTION
WHEN undefined_column THEN RETURN NEW;
END;
$$
LANGUAGE plpgsql;
CREATE TRIGGER queue_items_tg0
BEFORE INSERT OR UPDATE ON public.queue_items
FOR EACH ROW EXECUTE FUNCTION public.update_timestamp();
CREATE TRIGGER queue_items_tg1
AFTER INSERT OR DELETE OR UPDATE ON public.queue_items
FOR EACH ROW EXECUTE FUNCTION public.notify('queue_items');

View File

@@ -1,24 +0,0 @@
-- Upgrade the database from commit 74b3de5c to commit 83be83e4.
--
-- NOTE: This upgrade only affects the `public` schema.
--
-- This inserts a database schema version into the database.
-- Note that we are not otherwise changing the schema, so older
-- server code will continue to run against this version.
--
-- ATTENTION!
--
-- This value should be incremented every time that the database
-- schema changes (either `public` or any of the survey schemas)
-- and is used by the server at start-up to detect if it is
-- running against a compatible schema version.
--
-- To apply, run as the dougal user:
--
-- psql < $THIS_FILE
--
-- NOTE: It can be applied multiple times without ill effect.
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.1.0"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.1.0"}' WHERE public.info.key = 'version';

View File

@@ -1,84 +0,0 @@
-- Upgrade the database from commit 83be83e4 to 53ed096e.
--
-- New schema version: 0.2.0
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This migrates the file hashes to address issue #173.
-- The new hashes use size, modification time, creation time and the
-- first half of the MD5 hex digest of the file's absolute path.
--
-- It's a minor (rather than patch) version number increment because
-- changes to `bin/datastore.py` mean that the data is no longer
-- compatible with the hashing function.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can take a while if run on a large database.
-- NOTE: It can be applied multiple times without ill effect.
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE migrate_hashes (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Migrating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
EXECUTE format('UPDATE %I.files SET hash = array_to_string(array_append(trim_array(string_to_array(hash, '':''), 1), left(md5(path), 16)), '':'')', schema_name);
EXECUTE 'SET search_path TO public'; -- Back to the default search path for good measure
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE upgrade_10 () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL migrate_hashes(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL upgrade_10();
CALL show_notice('Cleaning up');
DROP PROCEDURE migrate_hashes (schema_name text);
DROP PROCEDURE upgrade_10 ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.2.0"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.2.0"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,189 +0,0 @@
-- Add function to retrieve sequence/shotpoint from timestamps and vice-versa
--
-- New schema version: 0.2.1
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects the public schema.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- Two new functions are defined:
--
-- sequence_shot_from_tstamp(tstamp, [tolerance]) → sequence, point, delta
--
-- Returns a sequence + shotpoint if one falls within `tolerance` seconds
-- of `tstamp`. The tolerance may be omitted in which case it defaults to
-- three seconds. If multiple values match, it returns the closest in time.
--
-- tstamp_from_sequence_shot(sequence, point) → tstamp
--
-- Returns a timestamp given a sequence and point number.
--
-- NOTE: This last function must be called from a search path including a
-- project schema, as it accesses the raw_shots table.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can take a while if run on a large database.
-- NOTE: It can be applied multiple times without ill effect.
-- NOTE: This will lock the database while the transaction is active.
--
-- WARNING: Applying this upgrade drops the old tables. Ensure that you
-- have migrated the data first.
--
-- NOTE: This is a patch version change so it does not require a
-- backend restart.
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION tstamp_from_sequence_shot(
IN s numeric,
IN p numeric,
OUT "ts" timestamptz)
AS $inner$
SELECT tstamp FROM raw_shots WHERE sequence = s AND point = p LIMIT 1;
$inner$ LANGUAGE SQL;
COMMENT ON FUNCTION tstamp_from_sequence_shot(numeric, numeric)
IS 'Get the timestamp of an existing shotpoint.';
CREATE OR REPLACE FUNCTION tstamp_interpolate(s numeric, p numeric) RETURNS timestamptz
AS $inner$
DECLARE
ts0 timestamptz;
ts1 timestamptz;
pt0 numeric;
pt1 numeric;
BEGIN
SELECT tstamp, point
INTO ts0, pt0
FROM raw_shots
WHERE sequence = s AND point < p
ORDER BY point DESC LIMIT 1;
SELECT tstamp, point
INTO ts1, pt1
FROM raw_shots
WHERE sequence = s AND point > p
ORDER BY point ASC LIMIT 1;
RETURN (ts1-ts0)/abs(pt1-pt0)*abs(p-pt0)+ts0;
END;
$inner$ LANGUAGE PLPGSQL;
COMMENT ON FUNCTION tstamp_interpolate(numeric, numeric)
IS 'Interpolate a timestamp given sequence and point values.
It will try to find the points immediately before and after in the sequence and interpolate into the gap, which may consist of multiple missed shots.
If called on an existing shotpoint it will return an interpolated timestamp as if the shotpoint did not exist, as opposed to returning its actual timestamp.
Returns NULL if it is not possible to interpolate.';
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $$
DECLARE
row RECORD;
BEGIN
CREATE OR REPLACE FUNCTION public.sequence_shot_from_tstamp(
IN ts timestamptz,
IN tolerance numeric,
OUT "sequence" numeric,
OUT "point" numeric,
OUT "delta" numeric)
AS $inner$
SELECT
(meta->>'_sequence')::numeric AS sequence,
(meta->>'_point')::numeric AS point,
extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ) AS delta
FROM real_time_inputs
WHERE
meta ? '_sequence' AND
abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts )) < tolerance
ORDER BY abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ))
LIMIT 1;
$inner$ LANGUAGE SQL;
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(timestamptz, numeric)
IS 'Get sequence and shotpoint from timestamp.
Given a timestamp this function returns the closest shot to it within the given tolerance value.
This uses the `real_time_inputs` table and it does not give an indication of which project the shotpoint belongs to. It is assumed that a single project is being acquired at a given time.';
CREATE OR REPLACE FUNCTION public.sequence_shot_from_tstamp(
IN ts timestamptz,
OUT "sequence" numeric,
OUT "point" numeric,
OUT "delta" numeric)
AS $inner$
SELECT * FROM public.sequence_shot_from_tstamp(ts, 3);
$inner$ LANGUAGE SQL;
COMMENT ON FUNCTION public.sequence_shot_from_tstamp(timestamptz)
IS 'Get sequence and shotpoint from timestamp.
Overloaded form in which the tolerance value is implied and defaults to three seconds.';
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_database();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_database ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.2.1"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.2.1"}' WHERE public.info.key = 'version';
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,360 +0,0 @@
-- Add new event log schema.
--
-- New schema version: 0.2.2
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
-- REQUIRES POSTGRESQL VERSION 14 OR NEWER
-- (Because of CREATE OR REPLACE TRIGGER)
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This is a redesign of the event logging mechanism. The old mechanism
-- relied on a distinction between sequence events (i.e., those which can
-- be associated to a shotpoint within a sequence), timed events (those
-- which occur outside any acquisition sequence) and so-called virtual
-- events (deduced from the data). It was inflexible and inefficient,
-- as most of the time we needed to merge those two types of events into
-- a single view.
--
-- The new mechanism:
-- - uses a single table
-- - accepts sequence event entries for shots or sequences which may not (yet)
-- exist. (https://gitlab.com/wgp/dougal/software/-/issues/170)
-- - keeps edit history (https://gitlab.com/wgp/dougal/software/-/issues/138)
-- - Keeps track of when an entry was made or subsequently edited.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can take a while if run on a large database.
-- NOTE: It can be applied multiple times without ill effect, as long
-- as the new tables did not previously exist. If they did, they will
-- be emptied before migrating the data.
--
-- WARNING: Applying this upgrade migrates the old event data. It does
-- NOT yet drop the old tables, which is handled in a separate script,
-- leaving the actions here technically reversible without having to
-- restore from backup.
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE SEQUENCE IF NOT EXISTS event_log_uid_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
CREATE TABLE IF NOT EXISTS event_log_full (
-- uid is a unique id for each entry in the table,
-- including revisions of an existing entry.
uid integer NOT NULL PRIMARY KEY DEFAULT nextval('event_log_uid_seq'),
-- All revisions of an entry share the same id.
-- If inserting a new entry, id = uid.
id integer NOT NULL,
-- No default tstamp because, for instance, a user could
-- enter a sequence/point event referring to the future.
-- An external process should scan those at regular intervals
-- and populate the tstamp as needed.
tstamp timestamptz NULL,
sequence integer NULL,
point integer NULL,
remarks text NOT NULL DEFAULT '',
labels text[] NOT NULL DEFAULT ARRAY[]::text[],
-- TODO: Need a geometry column? Let us check performance as it is
-- and if needed either add a geometry column + spatial index.
meta jsonb NOT NULL DEFAULT '{}'::jsonb,
validity tstzrange NOT NULL CHECK (NOT isempty(validity)),
-- We accept either:
-- - Just a tstamp
-- - Just a sequence / point pair
-- - All three
-- We don't accept:
-- - A sequence without a point or vice-versa
-- - Nothing being provided
CHECK (
(tstamp IS NOT NULL AND sequence IS NOT NULL AND point IS NOT NULL) OR
(tstamp IS NOT NULL AND sequence IS NULL AND point IS NULL) OR
(tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL)
)
);
CREATE INDEX IF NOT EXISTS event_log_id ON event_log_full USING btree (id);
CREATE OR REPLACE FUNCTION event_log_full_insert() RETURNS TRIGGER AS $inner$
BEGIN
NEW.id := COALESCE(NEW.id, NEW.uid);
NEW.validity := tstzrange(current_timestamp, NULL);
NEW.meta = COALESCE(NEW.meta, '{}'::jsonb);
NEW.labels = COALESCE(NEW.labels, ARRAY[]::text[]);
IF cardinality(NEW.labels) > 0 THEN
-- Remove duplicates
SELECT array_agg(DISTINCT elements)
INTO NEW.labels
FROM (SELECT unnest(NEW.labels) AS elements) AS labels;
END IF;
RETURN NEW;
END;
$inner$ LANGUAGE plpgsql;
CREATE OR REPLACE TRIGGER event_log_full_insert_tg
BEFORE INSERT ON event_log_full
FOR EACH ROW EXECUTE FUNCTION event_log_full_insert();
-- The public.notify() trigger to alert clients that something has changed
CREATE OR REPLACE TRIGGER event_log_full_notify_tg
AFTER INSERT OR DELETE OR UPDATE
ON event_log_full FOR EACH ROW EXECUTE FUNCTION public.notify('event');
--
-- VIEW event_log
--
-- This is what is exposed to the user most of the time.
-- It shows the current version of records in the event_log_full
-- table.
--
-- The user applies edits to this table directly, which are
-- processed via triggers.
--
CREATE OR REPLACE VIEW event_log AS
SELECT
id, tstamp, sequence, point, remarks, labels, meta,
uid <> id AS has_edits,
lower(validity) AS modified_on
FROM event_log_full
WHERE validity @> current_timestamp;
CREATE OR REPLACE FUNCTION event_log_update() RETURNS TRIGGER AS $inner$
BEGIN
IF (TG_OP = 'INSERT') THEN
-- Complete the tstamp if possible
IF NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL AND NEW.tstamp IS NULL THEN
SELECT COALESCE(
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
tstamp_interpolate(NEW.sequence, NEW.point)
)
INTO NEW.tstamp;
END IF;
-- Any id that is provided will be ignored. The generated
-- id will match uid.
INSERT INTO event_log_full
(tstamp, sequence, point, remarks, labels, meta)
VALUES (NEW.tstamp, NEW.sequence, NEW.point, NEW.remarks, NEW.labels, NEW.meta);
RETURN NEW;
ELSIF (TG_OP = 'UPDATE') THEN
-- Set end of validity and create a new entry with id
-- matching that of the old entry.
-- NOTE: Do not allow updating an event that has meta.readonly = true
IF EXISTS
(SELECT *
FROM event_log_full
WHERE id = OLD.id AND (meta->>'readonly')::boolean IS TRUE)
THEN
RAISE check_violation USING MESSAGE = 'Cannot modify read-only entry';
RETURN NULL;
END IF;
-- If the sequence / point has changed, and no new tstamp is provided, get one
IF NEW.sequence <> OLD.sequence OR NEW.point <> OLD.point
AND NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL
AND NEW.tstamp IS NULL OR NEW.tstamp = OLD.tstamp THEN
SELECT COALESCE(
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
tstamp_interpolate(NEW.sequence, NEW.point)
)
INTO NEW.tstamp;
END IF;
UPDATE event_log_full
SET validity = tstzrange(lower(validity), current_timestamp)
WHERE validity @> current_timestamp AND id = OLD.id;
-- Any attempt to modify id will be ignored.
INSERT INTO event_log_full
(id, tstamp, sequence, point, remarks, labels, meta)
VALUES (OLD.id, NEW.tstamp, NEW.sequence, NEW.point, NEW.remarks, NEW.labels, NEW.meta);
RETURN NEW;
ELSIF (TG_OP = 'DELETE') THEN
-- Set end of validity.
-- NOTE: We *do* allow deleting an event that has meta.readonly = true
-- This could be of interest if for instance we wanted to keep the history
-- of QC results for a point, provided that the QC routines write to
-- event_log and not event_log_full
UPDATE event_log_full
SET validity = tstzrange(lower(validity), current_timestamp)
WHERE validity @> current_timestamp AND id = OLD.id;
RETURN NULL;
END IF;
END;
$inner$ LANGUAGE plpgsql;
CREATE OR REPLACE TRIGGER event_log_tg
INSTEAD OF INSERT OR UPDATE OR DELETE ON event_log
FOR EACH ROW EXECUTE FUNCTION event_log_update();
-- NOTE
-- This is where we migrate the actual data
RAISE NOTICE 'Migrating schema %', schema_name;
-- We start by deleting any data that the new tables might
-- have had if they already existed.
DELETE FROM event_log_full;
-- We purposefully bypass event_log here, as the tables we're
-- migrating from only contain a single version of each event.
INSERT INTO event_log_full (tstamp, sequence, point, remarks, labels, meta)
SELECT
tstamp, sequence, point, remarks, labels,
meta || json_build_object('geometry', geometry, 'readonly', virtual)::jsonb
FROM events;
UPDATE event_log_full SET meta = meta - 'geometry' WHERE meta->>'geometry' IS NULL;
UPDATE event_log_full SET meta = meta - 'readonly' WHERE (meta->'readonly')::boolean IS false;
-- This function used the superseded `events` view.
-- We need to drop it because we're changing the return type.
DROP FUNCTION IF EXISTS label_in_sequence (_sequence integer, _label text);
CREATE OR REPLACE FUNCTION label_in_sequence (_sequence integer, _label text)
RETURNS event_log
LANGUAGE sql
AS $inner$
SELECT * FROM event_log WHERE sequence = _sequence AND _label = ANY(labels);
$inner$;
-- This function used the superseded `events` view (and a strange logic).
CREATE OR REPLACE PROCEDURE handle_final_line_events (_seq integer, _label text, _column text)
LANGUAGE plpgsql
AS $inner$
DECLARE
_line final_lines_summary%ROWTYPE;
_column_value integer;
_tg_name text := 'final_line';
_event event_log%ROWTYPE;
event_id integer;
BEGIN
SELECT * INTO _line FROM final_lines_summary WHERE sequence = _seq;
_event := label_in_sequence(_seq, _label);
_column_value := row_to_json(_line)->>_column;
--RAISE NOTICE '% is %', _label, _event;
--RAISE NOTICE 'Line is %', _line;
--RAISE NOTICE '% is % (%)', _column, _column_value, _label;
IF _event IS NULL THEN
--RAISE NOTICE 'We will populate the event log from the sequence data';
INSERT INTO event_log (sequence, point, remarks, labels, meta)
VALUES (
-- The sequence
_seq,
-- The shotpoint
_column_value,
-- Remark. Something like "FSP <linename>"
format('%s %s', _label, (SELECT meta->>'lineName' FROM final_lines WHERE sequence = _seq)),
-- Label
ARRAY[_label],
-- Meta. Something like {"auto" : {"FSP" : "final_line"}}
json_build_object('auto', json_build_object(_label, _tg_name))
);
ELSE
--RAISE NOTICE 'We may populate the sequence meta from the event log';
--RAISE NOTICE 'Unless the event log was populated by us previously';
--RAISE NOTICE 'Populated by us previously? %', _event.meta->'auto'->>_label = _tg_name;
IF _event.meta->'auto'->>_label IS DISTINCT FROM _tg_name THEN
--RAISE NOTICE 'Adding % found in events log to final_line meta', _label;
UPDATE final_lines
SET meta = jsonb_set(meta, ARRAY[_label], to_jsonb(_event.point))
WHERE sequence = _seq;
END IF;
END IF;
END;
$inner$;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_12 () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_12();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_12 ();
CALL show_notice('Updating db_schema version');
-- This is technically still compatible with 0.2.0 as we are only adding
-- some more tables and views but not yet dropping the old ones, which we
-- will do separately so that these scripts do not get too big.
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.2.2"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.2.2"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,98 +0,0 @@
-- Migrate events to new schema
--
-- New schema version: 0.3.0
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This migrates the data from the old event log tables to the new schema.
-- It is a *very* good idea to review the data manually after the migration
-- as issues with the logs that had gone unnoticed may become evident now.
--
-- WARNING: If data exists in the new event tables, IT WILL BE TRUNCATED.
--
-- Other than that, this migration is fairly benign as it does not modify
-- the old data.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can take a while if run on a large database.
-- NOTE: It can be applied multiple times without ill effect.
-- NOTE: This will lock the new event tables while the transaction is active.
--
-- WARNING: This is a minor (not patch) version change, meaning that it requires
-- an upgrade and restart of the backend server.
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
TRUNCATE event_log_full;
-- NOTE: meta->>'virtual' = TRUE means that the event was created algorithmically
-- and should not be user editable.
INSERT INTO event_log_full (tstamp, sequence, point, remarks, labels, meta)
SELECT
tstamp, sequence, point, remarks, labels,
meta || json_build_object('geometry', geometry, 'readonly', virtual)::jsonb
FROM events;
-- We purposefully bypass event_log here
UPDATE event_log_full SET meta = meta - 'geometry' WHERE meta->>'geometry' IS NULL;
UPDATE event_log_full SET meta = meta - 'readonly' WHERE (meta->'readonly')::boolean IS false;
END
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_database();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_database ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.0"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.0"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,99 +0,0 @@
-- Drop old event tables.
--
-- New schema version: 0.3.1
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This completes the migration from the old event logging mechanism by
-- DROPPING THE OLD DATABASE OBJECTS, MAKING THE MIGRATION IRREVERSIBLE,
-- other than by restoring from backup and manually transferring any new
-- data that may have been created in the meanwhile.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can take a while if run on a large database.
-- NOTE: It can be applied multiple times without ill effect.
-- NOTE: This will lock the database while the transaction is active.
--
-- WARNING: Applying this upgrade drops the old tables. Ensure that you
-- have migrated the data first.
--
-- NOTE: This is a patch version change so it does not require a
-- backend restart.
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
DROP FUNCTION IF EXISTS
label_in_sequence(integer,text), reset_events_serials();
DROP VIEW IF EXISTS
events_midnight_shot, events_seq_timed, events_labels, "events";
DROP TABLE IF EXISTS
events_seq_labels, events_timed_labels, events_timed_seq, events_seq, events_timed;
DROP SEQUENCE IF EXISTS
events_seq_id_seq, events_timed_id_seq;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_database();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_database ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.1"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.1"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,136 +0,0 @@
-- Fix project_summary view.
--
-- New schema version: 0.3.2
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This fixes a problem with the project_summary view. In its common table
-- expression, the view definition tried to search public.projects based on
-- the search path value with the following expression:
--
-- (current_setting('search_path'::text) ~~ (p.schema || '%'::text))
--
-- That is of course bound to fail as soon as the schema goes above `survey_9`
-- because `survey_10 LIKE ('survey_1' || '%')` is TRUE.
--
-- The new mechanism relies on splitting the search_path.
--
-- NOTE: The survey schema needs to be the leftmost element in search_path.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW project_summary AS
WITH fls AS (
SELECT avg((final_lines_summary.duration / ((final_lines_summary.num_points - 1))::double precision)) AS shooting_rate,
avg((final_lines_summary.length / date_part('epoch'::text, final_lines_summary.duration))) AS speed,
sum(final_lines_summary.duration) AS prod_duration,
sum(final_lines_summary.length) AS prod_distance
FROM final_lines_summary
), project AS (
SELECT p.pid,
p.name,
p.schema
FROM public.projects p
WHERE (split_part(current_setting('search_path'::text), ','::text, 1) = p.schema)
)
SELECT project.pid,
project.name,
project.schema,
( SELECT count(*) AS count
FROM preplot_lines
WHERE (preplot_lines.class = 'V'::bpchar)) AS lines,
ps.total,
ps.virgin,
ps.prime,
ps.other,
ps.ntba,
ps.remaining,
( SELECT to_json(fs.*) AS to_json
FROM final_shots fs
ORDER BY fs.tstamp
LIMIT 1) AS fsp,
( SELECT to_json(fs.*) AS to_json
FROM final_shots fs
ORDER BY fs.tstamp DESC
LIMIT 1) AS lsp,
( SELECT count(*) AS count
FROM raw_lines rl) AS seq_raw,
( SELECT count(*) AS count
FROM final_lines rl) AS seq_final,
fls.prod_duration,
fls.prod_distance,
fls.speed AS shooting_rate
FROM preplot_summary ps,
fls,
project;
ALTER TABLE project_summary OWNER TO postgres;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_15 () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_15();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_15 ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.2"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.2"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,169 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.3
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- The event_log_update() function that gets called when trying to update
-- the event_log view will not work if the caller does provide a timestamp
-- or sequence + point in the list of fields to be updated. See:
-- https://gitlab.com/wgp/dougal/software/-/issues/198
--
-- This fixes the problem by liberally using COALESCE() to merge the OLD
-- and NEW records.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION event_log_update() RETURNS trigger
LANGUAGE plpgsql
AS $inner$
BEGIN
IF (TG_OP = 'INSERT') THEN
-- Complete the tstamp if possible
IF NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL AND NEW.tstamp IS NULL THEN
SELECT COALESCE(
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
tstamp_interpolate(NEW.sequence, NEW.point)
)
INTO NEW.tstamp;
END IF;
-- Any id that is provided will be ignored. The generated
-- id will match uid.
INSERT INTO event_log_full
(tstamp, sequence, point, remarks, labels, meta)
VALUES (NEW.tstamp, NEW.sequence, NEW.point, NEW.remarks, NEW.labels, NEW.meta);
RETURN NEW;
ELSIF (TG_OP = 'UPDATE') THEN
-- Set end of validity and create a new entry with id
-- matching that of the old entry.
-- NOTE: Do not allow updating an event that has meta.readonly = true
IF EXISTS
(SELECT *
FROM event_log_full
WHERE id = OLD.id AND (meta->>'readonly')::boolean IS TRUE)
THEN
RAISE check_violation USING MESSAGE = 'Cannot modify read-only entry';
RETURN NULL;
END IF;
-- If the sequence / point has changed, and no new tstamp is provided, get one
IF NEW.sequence <> OLD.sequence OR NEW.point <> OLD.point
AND NEW.sequence IS NOT NULL AND NEW.point IS NOT NULL
AND NEW.tstamp IS NULL OR NEW.tstamp = OLD.tstamp THEN
SELECT COALESCE(
tstamp_from_sequence_shot(NEW.sequence, NEW.point),
tstamp_interpolate(NEW.sequence, NEW.point)
)
INTO NEW.tstamp;
END IF;
UPDATE event_log_full
SET validity = tstzrange(lower(validity), current_timestamp)
WHERE validity @> current_timestamp AND id = OLD.id;
-- Any attempt to modify id will be ignored.
INSERT INTO event_log_full
(id, tstamp, sequence, point, remarks, labels, meta)
VALUES (
OLD.id,
COALESCE(NEW.tstamp, OLD.tstamp),
COALESCE(NEW.sequence, OLD.sequence),
COALESCE(NEW.point, OLD.point),
COALESCE(NEW.remarks, OLD.remarks),
COALESCE(NEW.labels, OLD.labels),
COALESCE(NEW.meta, OLD.meta)
);
RETURN NEW;
ELSIF (TG_OP = 'DELETE') THEN
-- Set end of validity.
-- NOTE: We *do* allow deleting an event that has meta.readonly = true
-- This could be of interest if for instance we wanted to keep the history
-- of QC results for a point, provided that the QC routines write to
-- event_log and not event_log_full
UPDATE event_log_full
SET validity = tstzrange(lower(validity), current_timestamp)
WHERE validity @> current_timestamp AND id = OLD.id;
RETURN NULL;
END IF;
END;
$inner$;
CREATE OR REPLACE TRIGGER event_log_tg INSTEAD OF INSERT OR DELETE OR UPDATE ON event_log FOR EACH ROW EXECUTE FUNCTION event_log_update();
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_16 () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_16();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_16 ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.3"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.3"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,163 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.4
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This creates a new procedure augment_event_data() which tries to
-- populate missing event_log data, namely timestamps and geometries.
--
-- To do this it also adds a function public.geometry_from_tstamp()
-- which, given a timestamp, tries to fetch a geometry from real_time_inputs.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE PROCEDURE augment_event_data ()
LANGUAGE sql
AS $inner$
-- Populate the timestamp of sequence / point events
UPDATE event_log_full
SET tstamp = tstamp_from_sequence_shot(sequence, point)
WHERE
tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL;
-- Populate the geometry of sequence / point events for which
-- there is raw_shots data.
UPDATE event_log_full
SET meta = meta ||
jsonb_build_object(
'geometry',
(
SELECT st_transform(geometry, 4326)::jsonb
FROM raw_shots rs
WHERE rs.sequence = event_log_full.sequence AND rs.point = event_log_full.point
)
)
WHERE
sequence IS NOT NULL AND point IS NOT NULL AND
NOT meta ? 'geometry';
-- Populate the geometry of time-based events
UPDATE event_log_full e
SET
meta = meta || jsonb_build_object('geometry',
(SELECT st_transform(g.geometry, 4326)::jsonb
FROM geometry_from_tstamp(e.tstamp, 3) g))
WHERE
tstamp IS NOT NULL AND
sequence IS NULL AND point IS NULL AND
NOT meta ? 'geometry';
-- Get rid of null geometries
UPDATE event_log_full
SET
meta = meta - 'geometry'
WHERE
jsonb_typeof(meta->'geometry') = 'null';
-- Simplify the GeoJSON when the CRS is EPSG:4326
UPDATE event_log_full
SET
meta = meta #- '{geometry, crs}'
WHERE
meta->'geometry'->'crs'->'properties'->>'name' = 'EPSG:4326';
$inner$;
COMMENT ON PROCEDURE augment_event_data()
IS 'Populate missing timestamps and geometries in event_log_full';
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_17 () AS $$
DECLARE
row RECORD;
BEGIN
CALL show_notice('Adding index to real_time_inputs.meta->tstamp');
CREATE INDEX IF NOT EXISTS meta_tstamp_idx
ON public.real_time_inputs
USING btree ((meta->>'tstamp') DESC);
CALL show_notice('Creating function geometry_from_tstamp');
CREATE OR REPLACE FUNCTION public.geometry_from_tstamp(
IN ts timestamptz,
IN tolerance numeric,
OUT "geometry" geometry,
OUT "delta" numeric)
AS $inner$
SELECT
geometry,
extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ) AS delta
FROM real_time_inputs
WHERE
geometry IS NOT NULL AND
abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts )) < tolerance
ORDER BY abs(extract('epoch' FROM (meta->>'tstamp')::timestamptz - ts ))
LIMIT 1;
$inner$ LANGUAGE SQL;
COMMENT ON FUNCTION public.geometry_from_tstamp(timestamptz, numeric)
IS 'Get geometry from timestamp';
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_17();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_17 ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.4"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.4"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,158 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.5
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- The function label_in_sequence(integer, text) was missing for the
-- production schemas. This patch (re-)defines the function as well
-- as other function that depend on it (otherwise it does not get
-- picked up).
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION label_in_sequence(_sequence integer, _label text) RETURNS event_log
LANGUAGE sql
AS $inner$
SELECT * FROM event_log WHERE sequence = _sequence AND _label = ANY(labels);
$inner$;
-- We need to redefine the functions / procedures that call label_in_sequence
CREATE OR REPLACE PROCEDURE handle_final_line_events(IN _seq integer, IN _label text, IN _column text)
LANGUAGE plpgsql
AS $inner$
DECLARE
_line final_lines_summary%ROWTYPE;
_column_value integer;
_tg_name text := 'final_line';
_event event_log%ROWTYPE;
event_id integer;
BEGIN
SELECT * INTO _line FROM final_lines_summary WHERE sequence = _seq;
_event := label_in_sequence(_seq, _label);
_column_value := row_to_json(_line)->>_column;
--RAISE NOTICE '% is %', _label, _event;
--RAISE NOTICE 'Line is %', _line;
--RAISE NOTICE '% is % (%)', _column, _column_value, _label;
IF _event IS NULL THEN
--RAISE NOTICE 'We will populate the event log from the sequence data';
INSERT INTO event_log (sequence, point, remarks, labels, meta)
VALUES (
-- The sequence
_seq,
-- The shotpoint
_column_value,
-- Remark. Something like "FSP <linename>"
format('%s %s', _label, (SELECT meta->>'lineName' FROM final_lines WHERE sequence = _seq)),
-- Label
ARRAY[_label],
-- Meta. Something like {"auto" : {"FSP" : "final_line"}}
json_build_object('auto', json_build_object(_label, _tg_name))
);
ELSE
--RAISE NOTICE 'We may populate the sequence meta from the event log';
--RAISE NOTICE 'Unless the event log was populated by us previously';
--RAISE NOTICE 'Populated by us previously? %', _event.meta->'auto'->>_label = _tg_name;
IF _event.meta->'auto'->>_label IS DISTINCT FROM _tg_name THEN
--RAISE NOTICE 'Adding % found in events log to final_line meta', _label;
UPDATE final_lines
SET meta = jsonb_set(meta, ARRAY[_label], to_jsonb(_event.point))
WHERE sequence = _seq;
END IF;
END IF;
END;
$inner$;
CREATE OR REPLACE PROCEDURE final_line_post_import(IN _seq integer)
LANGUAGE plpgsql
AS $inner$
BEGIN
CALL handle_final_line_events(_seq, 'FSP', 'fsp');
CALL handle_final_line_events(_seq, 'FGSP', 'fsp');
CALL handle_final_line_events(_seq, 'LGSP', 'lsp');
CALL handle_final_line_events(_seq, 'LSP', 'lsp');
END;
$inner$;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_18 () AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$$ LANGUAGE plpgsql;
CALL pg_temp.upgrade_18();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade_18 ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.5"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.5"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,162 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.6
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This optimises geometry_from_tstamp() by many orders of magnitude
-- (issue #241). The redefinition of geometry_from_tstamp() necessitates
-- redefining dependent functions.
--
-- We also drop the index on real_time_inputs.meta->'tstamp' as it is no
-- longer used.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE PROCEDURE augment_event_data ()
LANGUAGE sql
AS $inner$
-- Populate the timestamp of sequence / point events
UPDATE event_log_full
SET tstamp = tstamp_from_sequence_shot(sequence, point)
WHERE
tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL;
-- Populate the geometry of sequence / point events for which
-- there is raw_shots data.
UPDATE event_log_full
SET meta = meta ||
jsonb_build_object(
'geometry',
(
SELECT st_transform(geometry, 4326)::jsonb
FROM raw_shots rs
WHERE rs.sequence = event_log_full.sequence AND rs.point = event_log_full.point
)
)
WHERE
sequence IS NOT NULL AND point IS NOT NULL AND
NOT meta ? 'geometry';
-- Populate the geometry of time-based events
UPDATE event_log_full e
SET
meta = meta || jsonb_build_object('geometry',
(SELECT st_transform(g.geometry, 4326)::jsonb
FROM geometry_from_tstamp(e.tstamp, 3) g))
WHERE
tstamp IS NOT NULL AND
sequence IS NULL AND point IS NULL AND
NOT meta ? 'geometry';
-- Get rid of null geometries
UPDATE event_log_full
SET
meta = meta - 'geometry'
WHERE
jsonb_typeof(meta->'geometry') = 'null';
-- Simplify the GeoJSON when the CRS is EPSG:4326
UPDATE event_log_full
SET
meta = meta #- '{geometry, crs}'
WHERE
meta->'geometry'->'crs'->'properties'->>'name' = 'EPSG:4326';
$inner$;
COMMENT ON PROCEDURE augment_event_data()
IS 'Populate missing timestamps and geometries in event_log_full';
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
BEGIN
CALL show_notice('Dropping index from real_time_inputs.meta->tstamp');
DROP INDEX IF EXISTS meta_tstamp_idx;
CALL show_notice('Creating function geometry_from_tstamp');
CREATE OR REPLACE FUNCTION public.geometry_from_tstamp(
IN ts timestamptz,
IN tolerance numeric,
OUT "geometry" geometry,
OUT "delta" numeric)
AS $inner$
SELECT
geometry,
extract('epoch' FROM tstamp - ts ) AS delta
FROM real_time_inputs
WHERE
geometry IS NOT NULL AND
tstamp BETWEEN (ts - tolerance * interval '1 second') AND (ts + tolerance * interval '1 second')
ORDER BY abs(extract('epoch' FROM tstamp - ts ))
LIMIT 1;
$inner$ LANGUAGE SQL;
COMMENT ON FUNCTION public.geometry_from_tstamp(timestamptz, numeric)
IS 'Get geometry from timestamp';
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.6"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.6"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,254 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.7
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This updates the adjust_planner() procedure to take into account the
-- new events schema (the `event` view has been replaced by `event_log`).
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CALL pg_temp.show_notice('Replacing adjust_planner() procedure');
CREATE OR REPLACE PROCEDURE adjust_planner()
LANGUAGE plpgsql
AS $$
DECLARE
_planner_config jsonb;
_planned_line planned_lines%ROWTYPE;
_lag interval;
_last_sequence sequences_summary%ROWTYPE;
_deltatime interval;
_shotinterval interval;
_tstamp timestamptz;
_incr integer;
BEGIN
SET CONSTRAINTS planned_lines_pkey DEFERRED;
SELECT data->'planner'
INTO _planner_config
FROM file_data
WHERE data ? 'planner';
SELECT *
INTO _last_sequence
FROM sequences_summary
ORDER BY sequence DESC
LIMIT 1;
SELECT *
INTO _planned_line
FROM planned_lines
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
SELECT
COALESCE(
((lead(ts0) OVER (ORDER BY sequence)) - ts1),
make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer)
)
INTO _lag
FROM planned_lines
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
_incr = sign(_last_sequence.lsp - _last_sequence.fsp);
RAISE NOTICE '_planner_config: %', _planner_config;
RAISE NOTICE '_last_sequence: %', _last_sequence;
RAISE NOTICE '_planned_line: %', _planned_line;
RAISE NOTICE '_incr: %', _incr;
-- Does the latest sequence match a planned sequence?
IF _planned_line IS NULL THEN -- No it doesn't
RAISE NOTICE 'Latest sequence shot does not match a planned sequence';
SELECT * INTO _planned_line FROM planned_lines ORDER BY sequence ASC LIMIT 1;
RAISE NOTICE '_planned_line: %', _planned_line;
IF _planned_line.sequence <= _last_sequence.sequence THEN
RAISE NOTICE 'Renumbering the planned sequences starting from %', _planned_line.sequence + 1;
-- Renumber the planned sequences starting from last shot sequence number + 1
UPDATE planned_lines
SET sequence = sequence + _last_sequence.sequence - _planned_line.sequence + 1;
END IF;
-- The correction to make to the first planned line's ts0 will be based on either the last
-- sequence's EOL + default line change time or the current time, whichever is later.
_deltatime := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1) + make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer), current_timestamp) - _planned_line.ts0;
-- Is the first of the planned lines start time in the past? (±5 mins)
IF _planned_line.ts0 < (current_timestamp - make_interval(mins => 5)) THEN
RAISE NOTICE 'First planned line is in the past. Adjusting times by %', _deltatime;
-- Adjust the start / end time of the planned lines by assuming that we are at
-- `defaultLineChangeDuration` minutes away from SOL of the first planned line.
UPDATE planned_lines
SET
ts0 = ts0 + _deltatime,
ts1 = ts1 + _deltatime;
END IF;
ELSE -- Yes it does
RAISE NOTICE 'Latest sequence does match a planned sequence: %, %', _planned_line.sequence, _planned_line.line;
-- Is it online?
IF EXISTS(SELECT 1 FROM raw_lines_files WHERE sequence = _last_sequence.sequence AND hash = '*online*') THEN
-- Yes it is
RAISE NOTICE 'Sequence % is online', _last_sequence.sequence;
-- Let us get the SOL from the events log if we can
RAISE NOTICE 'Trying to set fsp, ts0 from events log FSP, FGSP';
WITH e AS (
SELECT * FROM event_log
WHERE
sequence = _last_sequence.sequence
AND ('FSP' = ANY(labels) OR 'FGSP' = ANY(labels))
ORDER BY tstamp LIMIT 1
)
UPDATE planned_lines
SET
fsp = COALESCE(e.point, fsp),
ts0 = COALESCE(e.tstamp, ts0)
FROM e
WHERE planned_lines.sequence = _last_sequence.sequence;
-- Shot interval
_shotinterval := (_last_sequence.ts1 - _last_sequence.ts0) / abs(_last_sequence.lsp - _last_sequence.fsp);
RAISE NOTICE 'Estimating EOL from current shot interval: %', _shotinterval;
SELECT (abs(lsp-fsp) * _shotinterval + ts0) - ts1
INTO _deltatime
FROM planned_lines
WHERE sequence = _last_sequence.sequence;
---- Set ts1 for the current sequence
--UPDATE planned_lines
--SET
--ts1 = (abs(lsp-fsp) * _shotinterval) + ts0
--WHERE sequence = _last_sequence.sequence;
RAISE NOTICE 'Adjustment is %', _deltatime;
IF abs(EXTRACT(EPOCH FROM _deltatime)) < 8 THEN
RAISE NOTICE 'Adjustment too small (< 8 s), so not applying it';
RETURN;
END IF;
-- Adjust ts1 for the current sequence
UPDATE planned_lines
SET ts1 = ts1 + _deltatime
WHERE sequence = _last_sequence.sequence;
-- Now shift all sequences after
UPDATE planned_lines
SET ts0 = ts0 + _deltatime, ts1 = ts1 + _deltatime
WHERE sequence > _last_sequence.sequence;
RAISE NOTICE 'Deleting planned sequences before %', _planned_line.sequence;
-- Remove all previous planner entries.
DELETE
FROM planned_lines
WHERE sequence < _last_sequence.sequence;
ELSE
-- No it isn't
RAISE NOTICE 'Sequence % is offline', _last_sequence.sequence;
-- We were supposed to finish at _planned_line.ts1 but we finished at:
_tstamp := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1), current_timestamp);
-- WARNING Next line is for testing only
--_tstamp := COALESCE(_last_sequence.ts1_final, _last_sequence.ts1);
-- So we need to adjust timestamps by:
_deltatime := _tstamp - _planned_line.ts1;
RAISE NOTICE 'Planned end: %, actual end: % (%, %)', _planned_line.ts1, _tstamp, _planned_line.sequence, _last_sequence.sequence;
RAISE NOTICE 'Shifting times by % for sequences > %', _deltatime, _planned_line.sequence;
-- NOTE: This won't work if sequences are not, err… sequential.
-- NOTE: This has been known to happen in 2020.
UPDATE planned_lines
SET
ts0 = ts0 + _deltatime,
ts1 = ts1 + _deltatime
WHERE sequence > _planned_line.sequence;
RAISE NOTICE 'Deleting planned sequences up to %', _planned_line.sequence;
-- Remove all previous planner entries.
DELETE
FROM planned_lines
WHERE sequence <= _last_sequence.sequence;
END IF;
END IF;
END;
$$;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.7"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.7"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,267 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.8
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This adds event_position() and event_meta() functions which are used
-- to retrieve position or metadata, respectively, given either a timestamp
-- or a sequence / point pair. Intended to be used in the context of #229.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
--
-- event_position(): Fetch event position
--
CREATE OR REPLACE FUNCTION event_position (
tstamp timestamptz, sequence integer, point integer, tolerance numeric
)
RETURNS geometry
AS $$
DECLARE
position geometry;
BEGIN
-- Try and get position by sequence / point first
IF sequence IS NOT NULL AND point IS NOT NULL THEN
-- Try and get the position from final_shots or raw_shots
SELECT COALESCE(f.geometry, r.geometry) geometry
INTO position
FROM raw_shots r LEFT JOIN final_shots f USING (sequence, point)
WHERE r.sequence = event_position.sequence AND r.point = event_position.point;
IF position IS NOT NULL THEN
RETURN position;
ELSIF tstamp IS NULL THEN
-- Get the timestamp for the sequence / point, if we can.
-- It will be used later in the function as we fall back
-- to timestamp based search.
-- We also adjust the tolerance as we're now dealing with
-- an exact timestamp.
SELECT COALESCE(f.tstamp, r.tstamp) tstamp, 0.002 tolerance
INTO tstamp, tolerance
FROM raw_shots r LEFT JOIN final_shots f USING (sequence, point)
WHERE r.sequence = event_position.sequence AND r.point = event_position.point;
END IF;
END IF;
-- If we got here, we better have a timestamp
-- First attempt, get a position from final_shots, raw_shots. This may
-- be redundant if we got here from the position of having a sequence /
-- point without a position, but never mind.
SELECT COALESCE(f.geometry, r.geometry) geometry
INTO position
FROM raw_shots r LEFT JOIN final_shots f USING (sequence, point)
WHERE r.tstamp = event_position.tstamp OR f.tstamp = event_position.tstamp
LIMIT 1; -- Just to be sure
IF position IS NULL THEN
-- Ok, so everything else so far has failed, let's try and get this
-- from real time data. We skip the search via sequence / point and
-- go directly for timestamp.
SELECT geometry
INTO position
FROM geometry_from_tstamp(tstamp, tolerance);
END IF;
RETURN position;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_position (timestamptz, integer, integer, numeric) IS
'Return the position associated with a sequence / point in the current project or
with a given timestamp. Timestamp that is first searched for in the shot tables
of the current prospect or, if not found, in the real-time data.
Returns a geometry.';
CREATE OR REPLACE FUNCTION event_position (
tstamp timestamptz, sequence integer, point integer
)
RETURNS geometry
AS $$
BEGIN
RETURN event_position(tstamp, sequence, point, 3);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_position (timestamptz, integer, integer) IS
'Overload of event_position with a default tolerance of three seconds.';
CREATE OR REPLACE FUNCTION event_position (
tstamp timestamptz
)
RETURNS geometry
AS $$
BEGIN
RETURN event_position(tstamp, NULL, NULL);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_position (timestamptz) IS
'Overload of event_position (timestamptz, integer, integer) for use when searching by timestamp.';
CREATE OR REPLACE FUNCTION event_position (
sequence integer, point integer
)
RETURNS geometry
AS $$
BEGIN
RETURN event_position(NULL, sequence, point);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_position (integer, integer) IS
'Overload of event_position (timestamptz, integer, integer) for use when searching by sequence / point.';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
BEGIN
--
-- event_meta(): Fetch event metadata
--
CREATE OR REPLACE FUNCTION event_meta (
tstamp timestamptz, sequence integer, point integer
)
RETURNS jsonb
AS $$
DECLARE
result jsonb;
-- Tolerance is hard-coded, at least until a need to expose arises.
tolerance numeric;
BEGIN
tolerance := 3; -- seconds
-- We search by timestamp if we can, as that's a lot quicker
IF tstamp IS NOT NULL THEN
SELECT meta
INTO result
FROM real_time_inputs rti
WHERE
rti.tstamp BETWEEN (event_meta.tstamp - tolerance * interval '1 second') AND (event_meta.tstamp + tolerance * interval '1 second')
ORDER BY abs(extract('epoch' FROM rti.tstamp - event_meta.tstamp ))
LIMIT 1;
ELSE
SELECT meta
INTO result
FROM real_time_inputs rti
WHERE
(meta->>'_sequence')::integer = event_meta.sequence AND
(meta->>'_point')::integer = event_meta.point
ORDER BY rti.tstamp DESC
LIMIT 1;
END IF;
RETURN result;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_meta (timestamptz, integer, integer) IS
'Return the real-time event metadata associated with a sequence / point in the current project or
with a given timestamp. Timestamp that is first searched for in the shot tables
of the current prospect or, if not found, in the real-time data.
Returns a JSONB object.';
CREATE OR REPLACE FUNCTION event_meta (
tstamp timestamptz
)
RETURNS jsonb
AS $$
BEGIN
RETURN event_meta(tstamp, NULL, NULL);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_meta (timestamptz) IS
'Overload of event_meta (timestamptz, integer, integer) for use when searching by timestamp.';
CREATE OR REPLACE FUNCTION event_meta (
sequence integer, point integer
)
RETURNS jsonb
AS $$
BEGIN
RETURN event_meta(NULL, sequence, point);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION event_meta (integer, integer) IS
'Overload of event_meta (timestamptz, integer, integer) for use when searching by sequence / point.';
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.8"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.8"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,229 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.9
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This defines a replace_placeholders() function, taking as arguments
-- a text string and either a timestamp or a sequence / point pair. It
-- uses the latter arguments to find metadata from which it can extract
-- relevant information and replace it into the text string wherever the
-- appropriate placeholders appear. For instance, given a call such as
-- replace_placeholders('The position is @POS@', NULL, 11, 2600) it will
-- replace '@POS@' with the position of point 2600 in sequence 11, if it
-- exists (or leave the placeholder untouched otherwise).
--
-- A scan_placeholders() procedure is also defined, which calls the above
-- function on the entire event log.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION replace_placeholders (
text_in text, tstamp timestamptz, sequence integer, point integer
)
RETURNS text
AS $$
DECLARE
position geometry;
metadata jsonb;
text_out text;
json_query text;
json_result jsonb;
expect_recursion boolean := false;
BEGIN
text_out := text_in;
-- We only get a position if we are going to need it…
IF regexp_match(text_out, '@DMS@|@POS@|@DEG@') IS NOT NULL THEN
position := ST_Transform(event_position(tstamp, sequence, point), 4326);
END IF;
-- …and likewise with the metadata.
IF regexp_match(text_out, '@BSP@|@WD@|@CMG@|@EN@|@GRID@|@(\$\..*?)@@') IS NOT NULL THEN
metadata := event_meta(tstamp, sequence, point);
END IF;
-- We shortcut the evaluation if neither of the above regexps matched
IF position IS NULL AND metadata IS NULL THEN
RETURN text_out;
END IF;
IF position('@DMS@' IN text_out) != 0 THEN
text_out := replace(text_out, '@DMS@', ST_AsLatLonText(position));
END IF;
IF position('@POS@' IN text_out) != 0 THEN
text_out := replace(text_out, '@POS@', replace(ST_AsLatLonText(position, 'D.DDDDDD'), ' ', ', '));
END IF;
IF position('@DEG@' IN text_out) != 0 THEN
text_out := replace(text_out, '@DEG@', replace(ST_AsLatLonText(position, 'D.DDDDDD'), ' ', ', '));
END IF;
IF position('@EN@' IN text_out) != 0 THEN
IF metadata ? 'easting' AND metadata ? 'northing' THEN
text_out := replace(text_out, '@EN@', (metadata->>'easting') || ', ' || (metadata->>'northing'));
END IF;
END IF;
IF position('@GRID@' IN text_out) != 0 THEN
IF metadata ? 'easting' AND metadata ? 'northing' THEN
text_out := replace(text_out, '@GRID@', (metadata->>'easting') || ', ' || (metadata->>'northing'));
END IF;
END IF;
IF position('@CMG@' IN text_out) != 0 THEN
IF metadata ? 'bearing' THEN
text_out := replace(text_out, '@CMG@', metadata->>'bearing');
END IF;
END IF;
IF position('@BSP@' IN text_out) != 0 THEN
IF metadata ? 'speed' THEN
text_out := replace(text_out, '@BSP@', round((metadata->>'speed')::numeric * 3600 / 1852, 1)::text);
END IF;
END IF;
IF position('@WD@' IN text_out) != 0 THEN
IF metadata ? 'waterDepth' THEN
text_out := replace(text_out, '@WD@', metadata->>'waterDepth');
END IF;
END IF;
json_query := (regexp_match(text_out, '@(\$\..*?)@@'))[1];
IF json_query IS NOT NULL THEN
json_result := jsonb_path_query_array(metadata, json_query::jsonpath);
IF jsonb_array_length(json_result) = 1 THEN
text_out := replace(text_out, '@'||json_query||'@@', json_result->>0);
ELSE
text_out := replace(text_out, '@'||json_query||'@@', json_result::text);
END IF;
-- There might be multiple JSONPath queries, so we may have to recurse
expect_recursion := true;
END IF;
IF expect_recursion IS TRUE AND text_in != text_out THEN
--RAISE NOTICE 'Recursing %', text_out;
-- We don't know if we have found all the JSONPath expression
-- so we do another pass.
RETURN replace_placeholders(text_out, tstamp, sequence, point);
ELSE
RETURN text_out;
END IF;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION replace_placeholders (text, timestamptz, integer, integer) IS
'Replace certain placeholder strings in the input text with data obtained from shot or real-time data.';
CREATE OR REPLACE PROCEDURE scan_placeholders ()
LANGUAGE sql
AS $$
-- We update non read-only events via the event_log view to leave a trace
-- of the fact that placeholders were replaced (and when).
-- Note that this will not replace placeholders of old edits.
UPDATE event_log
SET remarks = replace_placeholders(remarks, tstamp, sequence, point)
FROM (
SELECT id
FROM event_log e
WHERE
(meta->'readonly')::boolean IS NOT TRUE AND (
regexp_match(remarks, '@DMS@|@POS@|@DEG@') IS NOT NULL OR
regexp_match(remarks, '@BSP@|@WD@|@CMG@|@EN@|@GRID@|@(\$\..*?)@@') IS NOT NULL
)
) t
WHERE event_log.id = t.id;
-- And then we update read-only events directly on the event_log_full table
-- (as of this version of the schema we're prevented from updating read-only
-- events via event_log anyway).
UPDATE event_log_full
SET remarks = replace_placeholders(remarks, tstamp, sequence, point)
FROM (
SELECT uid
FROM event_log_full e
WHERE
(meta->'readonly')::boolean IS TRUE AND (
regexp_match(remarks, '@DMS@|@POS@|@DEG@') IS NOT NULL OR
regexp_match(remarks, '@BSP@|@WD@|@CMG@|@EN@|@GRID@|@(\$\..*?)@@') IS NOT NULL
)
) t
WHERE event_log_full.uid = t.uid;
$$;
COMMENT ON PROCEDURE scan_placeholders () IS
'Run replace_placeholders() on the entire event log.';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.9"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.9"}' WHERE public.info.key = 'version';
CALL show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,127 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.10
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects only the public schema.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This defines a interpolate_geometry_from_tstamp(), taking a timestamp
-- and a maximum timespan in seconds. It will then interpolate a position
-- at the exact timestamp based on data from real_time_inputs, provided
-- that the effective interpolation timespan does not exceed the maximum
-- requested.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
BEGIN
CALL pg_temp.show_notice('Defining interpolate_geometry_from_tstamp()');
CREATE OR REPLACE FUNCTION public.interpolate_geometry_from_tstamp(
IN ts timestamptz,
IN maxspan numeric
)
RETURNS geometry
AS $$
DECLARE
ts0 timestamptz;
ts1 timestamptz;
geom0 geometry;
geom1 geometry;
span numeric;
fraction numeric;
BEGIN
SELECT tstamp, geometry
INTO ts0, geom0
FROM real_time_inputs
WHERE tstamp <= ts
ORDER BY tstamp DESC
LIMIT 1;
SELECT tstamp, geometry
INTO ts1, geom1
FROM real_time_inputs
WHERE tstamp >= ts
ORDER BY tstamp ASC
LIMIT 1;
IF geom0 IS NULL OR geom1 IS NULL THEN
RAISE NOTICE 'Interpolation failed (no straddling data)';
RETURN NULL;
END IF;
-- See if we got an exact match
IF ts0 = ts THEN
RETURN geom0;
ELSIF ts1 = ts THEN
RETURN geom1;
END IF;
span := extract('epoch' FROM ts1 - ts0);
IF span > maxspan THEN
RAISE NOTICE 'Interpolation timespan % outside maximum requested (%)', span, maxspan;
RETURN NULL;
END IF;
fraction := extract('epoch' FROM ts - ts0) / span;
IF fraction < 0 OR fraction > 1 THEN
RAISE NOTICE 'Requested timestamp % outside of interpolation span (fraction: %)', ts, fraction;
RETURN NULL;
END IF;
RETURN ST_LineInterpolatePoint(St_MakeLine(geom0, geom1), fraction);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION public.interpolate_geometry_from_tstamp(timestamptz, numeric) IS
'Interpolate a position over a given maximum timespan (in seconds)
based on real-time inputs. Returns a POINT geometry.';
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.10"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.10"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,149 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.11
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This redefines augment_event_data() to use interpolation rather than
-- nearest neighbour. It now takes an argument indicating the maximum
-- allowed interpolation timespan. An overload with a default of ten
-- minutes is also provided, as an in situ replacement for the previous
-- version.
--
-- The ten minute default is based on Triggerfish headers behaviour seen
-- on crew 248 during soft starts.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE PROCEDURE augment_event_data (maxspan numeric)
LANGUAGE sql
AS $$
-- Populate the timestamp of sequence / point events
UPDATE event_log_full
SET tstamp = tstamp_from_sequence_shot(sequence, point)
WHERE
tstamp IS NULL AND sequence IS NOT NULL AND point IS NOT NULL;
-- Populate the geometry of sequence / point events for which
-- there is raw_shots data.
UPDATE event_log_full
SET meta = meta ||
jsonb_build_object(
'geometry',
(
SELECT st_transform(geometry, 4326)::jsonb
FROM raw_shots rs
WHERE rs.sequence = event_log_full.sequence AND rs.point = event_log_full.point
)
)
WHERE
sequence IS NOT NULL AND point IS NOT NULL AND
NOT meta ? 'geometry';
-- Populate the geometry of time-based events
UPDATE event_log_full e
SET
meta = meta || jsonb_build_object('geometry',
(SELECT st_transform(g.geometry, 4326)::jsonb
FROM interpolate_geometry_from_tstamp(e.tstamp, maxspan) g))
WHERE
tstamp IS NOT NULL AND
sequence IS NULL AND point IS NULL AND
NOT meta ? 'geometry';
-- Get rid of null geometries
UPDATE event_log_full
SET
meta = meta - 'geometry'
WHERE
jsonb_typeof(meta->'geometry') = 'null';
-- Simplify the GeoJSON when the CRS is EPSG:4326
UPDATE event_log_full
SET
meta = meta #- '{geometry, crs}'
WHERE
meta->'geometry'->'crs'->'properties'->>'name' = 'EPSG:4326';
$$;
COMMENT ON PROCEDURE augment_event_data(numeric)
IS 'Populate missing timestamps and geometries in event_log_full';
CREATE OR REPLACE PROCEDURE augment_event_data ()
LANGUAGE sql
AS $$
CALL augment_event_data(600);
$$;
COMMENT ON PROCEDURE augment_event_data()
IS 'Overload of augment_event_data(maxspan numeric) with a maxspan value of 600 seconds.';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.11"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.11"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,193 +0,0 @@
-- Fix not being able to edit a time-based event.
--
-- New schema version: 0.3.12
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This defines a midnight_shots view and a log_midnight_shots() procedure
-- (with some overloads). The view returns all points straddling midnight
-- UTC and belonging to the same sequence (so last shot of the day and
-- first shot of the next day).
--
-- The procedure inserts the corresponding events (optionally constrained
-- by an earliest and a latest date) in the event log, unless the events
-- already exist.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW midnight_shots AS
WITH straddlers AS (
-- Get sequence numbers straddling midnight UTC
SELECT sequence
FROM final_shots
GROUP BY sequence
HAVING min(date(tstamp)) != max(date(tstamp))
),
ts AS (
-- Get earliest and latest timestamps for each day
-- for each of the above sequences.
-- This will return the timestamps for:
-- FSP, LDSP, FDSP, LSP.
SELECT
fs.sequence,
min(fs.tstamp) AS ts0,
max(fs.tstamp) AS ts1
FROM final_shots fs INNER JOIN straddlers USING (sequence)
GROUP BY fs.sequence, (date(fs.tstamp))
ORDER BY fs.sequence, date(fs.tstamp)
),
spts AS (
-- Filter out FSP, LSP from the above.
-- NOTE: This *should* in theory be able to cope with
-- a sequence longer than 24 hours (so with more than
-- one LDSP, FDSP) but that hasn't been tested.
SELECT DISTINCT
sequence,
min(ts1) OVER (PARTITION BY sequence) ldsp,
max(ts0) OVER (PARTITION BY sequence) fdsp
FROM ts
ORDER BY sequence
), evt AS (
SELECT
fs.tstamp,
fs.sequence,
point,
'Last shotpoint of the day' remarks,
'{LDSP}'::text[] labels
FROM final_shots fs
INNER JOIN spts ON fs.sequence = spts.sequence AND fs.tstamp = spts.ldsp
UNION SELECT
fs.tstamp,
fs.sequence,
point,
'First shotpoint of the day' remarks,
'{FDSP}'::text[] labels
FROM final_shots fs
INNER JOIN spts ON fs.sequence = spts.sequence AND fs.tstamp = spts.fdsp
ORDER BY tstamp
)
SELECT * FROM evt;
CREATE OR REPLACE PROCEDURE log_midnight_shots (dt0 date, dt1 date)
LANGUAGE sql
AS $$
INSERT INTO event_log (sequence, point, remarks, labels, meta)
SELECT
sequence, point, remarks, labels,
'{"auto": true, "insertedBy": "log_midnight_shots"}'::jsonb
FROM midnight_shots ms
WHERE
(dt0 IS NULL OR ms.tstamp >= dt0) AND
(dt1 IS NULL OR ms.tstamp <= dt1) AND
NOT EXISTS (
SELECT 1
FROM event_log el
WHERE ms.sequence = el.sequence AND ms.point = el.point AND el.labels @> ms.labels
);
-- Delete any midnight shots that might have been inserted in the log
-- but are no longer relevant according to the final_shots data.
-- We operate on event_log, so the deletion is traceable.
DELETE
FROM event_log
WHERE id IN (
SELECT id
FROM event_log el
LEFT JOIN midnight_shots ms USING (sequence, point)
WHERE
'{LDSP,FDSP}'::text[] && el.labels -- &&: Do the arrays overlap?
AND ms.sequence IS NULL
);
$$;
COMMENT ON PROCEDURE log_midnight_shots (date, date)
IS 'Add midnight shots between two dates dt0 and dt1 to the event_log, unless the events already exist.';
CREATE OR REPLACE PROCEDURE log_midnight_shots (dt0 date)
LANGUAGE sql
AS $$
CALL log_midnight_shots(dt0, NULL);
$$;
COMMENT ON PROCEDURE log_midnight_shots (date)
IS 'Overload taking only a dt0 (adds events on that date or after).';
CREATE OR REPLACE PROCEDURE log_midnight_shots ()
LANGUAGE sql
AS $$
CALL log_midnight_shots(NULL, NULL);
$$;
COMMENT ON PROCEDURE log_midnight_shots ()
IS 'Overload taking no arguments (adds all missing events).';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
BEGIN
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.12"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.12"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,162 +0,0 @@
-- Fix wrong number of missing shots in summary views
--
-- New schema version: 0.3.13
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- Fixes a bug in the `final_lines_summary` and `raw_lines_summary` views
-- which results in the number of missing shots being miscounted on jobs
-- using three sources.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW raw_lines_summary AS
WITH summary AS (
SELECT DISTINCT rs.sequence,
first_value(rs.point) OVER w AS fsp,
last_value(rs.point) OVER w AS lsp,
first_value(rs.tstamp) OVER w AS ts0,
last_value(rs.tstamp) OVER w AS ts1,
count(rs.point) OVER w AS num_points,
count(pp.point) OVER w AS num_preplots,
public.st_distance(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) AS length,
((public.st_azimuth(first_value(rs.geometry) OVER w, last_value(rs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
FROM (raw_shots rs
LEFT JOIN preplot_points pp USING (line, point))
WINDOW w AS (PARTITION BY rs.sequence ORDER BY rs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT rl.sequence,
rl.line,
s.fsp,
s.lsp,
s.ts0,
s.ts1,
(s.ts1 - s.ts0) AS duration,
s.num_points,
s.num_preplots,
(SELECT count(*) AS count
FROM missing_sequence_raw_points
WHERE missing_sequence_raw_points.sequence = s.sequence) AS missing_shots,
s.length,
s.azimuth,
rl.remarks,
rl.ntbp,
rl.meta
FROM (summary s
JOIN raw_lines rl USING (sequence));
CREATE OR REPLACE VIEW final_lines_summary AS
WITH summary AS (
SELECT DISTINCT fs.sequence,
first_value(fs.point) OVER w AS fsp,
last_value(fs.point) OVER w AS lsp,
first_value(fs.tstamp) OVER w AS ts0,
last_value(fs.tstamp) OVER w AS ts1,
count(fs.point) OVER w AS num_points,
public.st_distance(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) AS length,
((public.st_azimuth(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) * (180)::double precision) / pi()) AS azimuth
FROM final_shots fs
WINDOW w AS (PARTITION BY fs.sequence ORDER BY fs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT fl.sequence,
fl.line,
s.fsp,
s.lsp,
s.ts0,
s.ts1,
(s.ts1 - s.ts0) AS duration,
s.num_points,
( SELECT count(*) AS count
FROM missing_sequence_final_points
WHERE missing_sequence_final_points.sequence = s.sequence) AS missing_shots,
s.length,
s.azimuth,
fl.remarks,
fl.meta
FROM (summary s
JOIN final_lines fl USING (sequence));
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.3.13' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.3.12' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.3.13"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.3.13"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,122 +0,0 @@
-- Fix wrong number of missing shots in summary views
--
-- New schema version: 0.4.0
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This adapts the schema to the change in how project configurations are
-- handled (https://gitlab.com/wgp/dougal/software/-/merge_requests/29)
-- by creating a project_configuration() function which returns the
-- current project's configuration data.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION project_configuration()
RETURNS jsonb
LANGUAGE plpgsql
AS $$
DECLARE
schema_name text;
configuration jsonb;
BEGIN
SELECT nspname
INTO schema_name
FROM pg_namespace
WHERE oid = (
SELECT pronamespace
FROM pg_proc
WHERE oid = 'project_configuration'::regproc::oid
);
SELECT meta
INTO configuration
FROM public.projects
WHERE schema = schema_name;
RETURN configuration;
END
$$;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.4.0' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.3.12' AND current_db_version != '0.3.13' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.0"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.0"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,264 +0,0 @@
-- Fix wrong number of missing shots in summary views
--
-- New schema version: 0.4.1
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This modifies adjust_planner() to use project_configuration()
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE PROCEDURE adjust_planner()
LANGUAGE plpgsql
AS $$
DECLARE
_planner_config jsonb;
_planned_line planned_lines%ROWTYPE;
_lag interval;
_last_sequence sequences_summary%ROWTYPE;
_deltatime interval;
_shotinterval interval;
_tstamp timestamptz;
_incr integer;
BEGIN
SET CONSTRAINTS planned_lines_pkey DEFERRED;
SELECT project_configuration()->'planner'
INTO _planner_config;
SELECT *
INTO _last_sequence
FROM sequences_summary
ORDER BY sequence DESC
LIMIT 1;
SELECT *
INTO _planned_line
FROM planned_lines
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
SELECT
COALESCE(
((lead(ts0) OVER (ORDER BY sequence)) - ts1),
make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer)
)
INTO _lag
FROM planned_lines
WHERE sequence = _last_sequence.sequence AND line = _last_sequence.line;
_incr = sign(_last_sequence.lsp - _last_sequence.fsp);
RAISE NOTICE '_planner_config: %', _planner_config;
RAISE NOTICE '_last_sequence: %', _last_sequence;
RAISE NOTICE '_planned_line: %', _planned_line;
RAISE NOTICE '_incr: %', _incr;
-- Does the latest sequence match a planned sequence?
IF _planned_line IS NULL THEN -- No it doesn't
RAISE NOTICE 'Latest sequence shot does not match a planned sequence';
SELECT * INTO _planned_line FROM planned_lines ORDER BY sequence ASC LIMIT 1;
RAISE NOTICE '_planned_line: %', _planned_line;
IF _planned_line.sequence <= _last_sequence.sequence THEN
RAISE NOTICE 'Renumbering the planned sequences starting from %', _planned_line.sequence + 1;
-- Renumber the planned sequences starting from last shot sequence number + 1
UPDATE planned_lines
SET sequence = sequence + _last_sequence.sequence - _planned_line.sequence + 1;
END IF;
-- The correction to make to the first planned line's ts0 will be based on either the last
-- sequence's EOL + default line change time or the current time, whichever is later.
_deltatime := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1) + make_interval(mins => (_planner_config->>'defaultLineChangeDuration')::integer), current_timestamp) - _planned_line.ts0;
-- Is the first of the planned lines start time in the past? (±5 mins)
IF _planned_line.ts0 < (current_timestamp - make_interval(mins => 5)) THEN
RAISE NOTICE 'First planned line is in the past. Adjusting times by %', _deltatime;
-- Adjust the start / end time of the planned lines by assuming that we are at
-- `defaultLineChangeDuration` minutes away from SOL of the first planned line.
UPDATE planned_lines
SET
ts0 = ts0 + _deltatime,
ts1 = ts1 + _deltatime;
END IF;
ELSE -- Yes it does
RAISE NOTICE 'Latest sequence does match a planned sequence: %, %', _planned_line.sequence, _planned_line.line;
-- Is it online?
IF EXISTS(SELECT 1 FROM raw_lines_files WHERE sequence = _last_sequence.sequence AND hash = '*online*') THEN
-- Yes it is
RAISE NOTICE 'Sequence % is online', _last_sequence.sequence;
-- Let us get the SOL from the events log if we can
RAISE NOTICE 'Trying to set fsp, ts0 from events log FSP, FGSP';
WITH e AS (
SELECT * FROM event_log
WHERE
sequence = _last_sequence.sequence
AND ('FSP' = ANY(labels) OR 'FGSP' = ANY(labels))
ORDER BY tstamp LIMIT 1
)
UPDATE planned_lines
SET
fsp = COALESCE(e.point, fsp),
ts0 = COALESCE(e.tstamp, ts0)
FROM e
WHERE planned_lines.sequence = _last_sequence.sequence;
-- Shot interval
_shotinterval := (_last_sequence.ts1 - _last_sequence.ts0) / abs(_last_sequence.lsp - _last_sequence.fsp);
RAISE NOTICE 'Estimating EOL from current shot interval: %', _shotinterval;
SELECT (abs(lsp-fsp) * _shotinterval + ts0) - ts1
INTO _deltatime
FROM planned_lines
WHERE sequence = _last_sequence.sequence;
---- Set ts1 for the current sequence
--UPDATE planned_lines
--SET
--ts1 = (abs(lsp-fsp) * _shotinterval) + ts0
--WHERE sequence = _last_sequence.sequence;
RAISE NOTICE 'Adjustment is %', _deltatime;
IF abs(EXTRACT(EPOCH FROM _deltatime)) < 8 THEN
RAISE NOTICE 'Adjustment too small (< 8 s), so not applying it';
RETURN;
END IF;
-- Adjust ts1 for the current sequence
UPDATE planned_lines
SET ts1 = ts1 + _deltatime
WHERE sequence = _last_sequence.sequence;
-- Now shift all sequences after
UPDATE planned_lines
SET ts0 = ts0 + _deltatime, ts1 = ts1 + _deltatime
WHERE sequence > _last_sequence.sequence;
RAISE NOTICE 'Deleting planned sequences before %', _planned_line.sequence;
-- Remove all previous planner entries.
DELETE
FROM planned_lines
WHERE sequence < _last_sequence.sequence;
ELSE
-- No it isn't
RAISE NOTICE 'Sequence % is offline', _last_sequence.sequence;
-- We were supposed to finish at _planned_line.ts1 but we finished at:
_tstamp := GREATEST(COALESCE(_last_sequence.ts1_final, _last_sequence.ts1), current_timestamp);
-- WARNING Next line is for testing only
--_tstamp := COALESCE(_last_sequence.ts1_final, _last_sequence.ts1);
-- So we need to adjust timestamps by:
_deltatime := _tstamp - _planned_line.ts1;
RAISE NOTICE 'Planned end: %, actual end: % (%, %)', _planned_line.ts1, _tstamp, _planned_line.sequence, _last_sequence.sequence;
RAISE NOTICE 'Shifting times by % for sequences > %', _deltatime, _planned_line.sequence;
-- NOTE: This won't work if sequences are not, err… sequential.
-- NOTE: This has been known to happen in 2020.
UPDATE planned_lines
SET
ts0 = ts0 + _deltatime,
ts1 = ts1 + _deltatime
WHERE sequence > _planned_line.sequence;
RAISE NOTICE 'Deleting planned sequences up to %', _planned_line.sequence;
-- Remove all previous planner entries.
DELETE
FROM planned_lines
WHERE sequence <= _last_sequence.sequence;
END IF;
END IF;
END;
$$;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.4.1' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.4.0' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.1"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.1"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,98 +0,0 @@
-- Fix wrong number of missing shots in summary views
--
-- New schema version: 0.4.2
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This modifies binning_parameters() to use project_configuration()
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION binning_parameters() RETURNS jsonb
LANGUAGE sql STABLE LEAKPROOF PARALLEL SAFE
AS $$
SELECT project_configuration()->'binning' binning;
$$;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.4.2' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.4.1' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.2"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.2"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,164 +0,0 @@
-- Support notification payloads larger than Postgres' NOTIFY limit.
--
-- New schema version: 0.4.3
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects the public schema only.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This creates a new table where large notification payloads are stored
-- temporarily and from which they might be recalled by the notification
-- listeners. It also creates a purge_notifications() procedure used to
-- clean up old notifications from the notifications log and finally,
-- modifies notify() to support these changes. When a large payload is
-- encountered, the payload is stored in the notify_payloads table and
-- a trimmed down version containing a notification_id is sent to listeners
-- instead. Listeners can then query notify_payloads to retrieve the full
-- payloads. It is the application layer's responsibility to delete old
-- notifications.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_schema () AS $outer$
BEGIN
RAISE NOTICE 'Updating public schema';
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO public');
CREATE TABLE IF NOT EXISTS public.notify_payloads (
id SERIAL,
tstamp timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP,
payload text NOT NULL DEFAULT '',
PRIMARY KEY (id)
);
CREATE INDEX IF NOT EXISTS notify_payload_tstamp ON notify_payloads (tstamp);
CREATE OR REPLACE FUNCTION public.notify() RETURNS trigger
LANGUAGE plpgsql
AS $$
DECLARE
channel text := TG_ARGV[0];
pid text;
payload text;
notification text;
payload_id integer;
BEGIN
SELECT projects.pid INTO pid FROM projects WHERE schema = TG_TABLE_SCHEMA;
payload := json_build_object(
'tstamp', CURRENT_TIMESTAMP,
'operation', TG_OP,
'schema', TG_TABLE_SCHEMA,
'table', TG_TABLE_NAME,
'old', row_to_json(OLD),
'new', row_to_json(NEW),
'pid', pid
)::text;
IF octet_length(payload) < 1000 THEN
PERFORM pg_notify(channel, payload);
ELSE
-- We need to find another solution
-- FIXME Consider storing the payload in a temporary memory table,
-- referenced by some form of autogenerated ID. Then send the ID
-- as the payload and then it's up to the user to fetch the original
-- payload if interested. This needs a mechanism to expire older payloads
-- in the interest of conserving memory.
INSERT INTO notify_payloads (payload) VALUES (payload) RETURNING id INTO payload_id;
notification := json_build_object(
'tstamp', CURRENT_TIMESTAMP,
'operation', TG_OP,
'schema', TG_TABLE_SCHEMA,
'table', TG_TABLE_NAME,
'pid', pid,
'payload_id', payload_id
)::text;
PERFORM pg_notify(channel, notification);
RAISE INFO 'Payload over limit';
END IF;
RETURN NULL;
END;
$$;
CREATE PROCEDURE public.purge_notifications (age_seconds numeric DEFAULT 120) AS $$
DELETE FROM notify_payloads WHERE EXTRACT(epoch FROM CURRENT_TIMESTAMP - tstamp) > age_seconds;
$$ LANGUAGE sql;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.4.3' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.4.2' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
-- This upgrade modified the `public` schema only, not individual
-- project schemas.
CALL pg_temp.upgrade_schema();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_schema ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.3"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.3"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,104 +0,0 @@
-- Add event_log_changes function
--
-- New schema version: 0.4.4
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This adds a function event_log_changes which returns the subset of
-- events from event_log_full which have been modified on or after a
-- given timestamp.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE FUNCTION event_log_changes(ts0 timestamptz)
RETURNS SETOF event_log_full
LANGUAGE sql
AS $$
SELECT *
FROM event_log_full
WHERE lower(validity) > ts0 OR upper(validity) IS NOT NULL AND upper(validity) > ts0
ORDER BY lower(validity);
$$;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.4.4' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.4.3' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.4"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.4"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,147 +0,0 @@
-- Turn project_summary into a materialised view
--
-- New schema version: 0.4.5
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- The project_summary view is quite a bottleneck. While it itself is
-- not the real culprit (rather the underlying views are), this is one
-- relatively cheap way of improving responsiveness from the client's
-- point of view.
-- We leave the details of how / when to refresh the view to the non-
-- database code.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
DROP VIEW project_summary;
CREATE MATERIALIZED VIEW project_summary AS
WITH fls AS (
SELECT
avg((final_lines_summary.duration / ((final_lines_summary.num_points - 1))::double precision)) AS shooting_rate,
avg((final_lines_summary.length / date_part('epoch'::text, final_lines_summary.duration))) AS speed,
sum(final_lines_summary.duration) AS prod_duration,
sum(final_lines_summary.length) AS prod_distance
FROM final_lines_summary
), project AS (
SELECT
p.pid,
p.name,
p.schema
FROM public.projects p
WHERE (split_part(current_setting('search_path'::text), ','::text, 1) = p.schema)
)
SELECT
project.pid,
project.name,
project.schema,
( SELECT count(*) AS count
FROM preplot_lines
WHERE (preplot_lines.class = 'V'::bpchar)) AS lines,
ps.total,
ps.virgin,
ps.prime,
ps.other,
ps.ntba,
ps.remaining,
( SELECT to_json(fs.*) AS to_json
FROM final_shots fs
ORDER BY fs.tstamp
LIMIT 1) AS fsp,
( SELECT to_json(fs.*) AS to_json
FROM final_shots fs
ORDER BY fs.tstamp DESC
LIMIT 1) AS lsp,
( SELECT count(*) AS count
FROM raw_lines rl) AS seq_raw,
( SELECT count(*) AS count
FROM final_lines rl) AS seq_final,
fls.prod_duration,
fls.prod_distance,
fls.speed AS shooting_rate
FROM preplot_summary ps,
fls,
project;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.4.5' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.4.4' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.4.5"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.4.5"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,164 +0,0 @@
-- Sailline ancillary data
--
-- New schema version: 0.5.0
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- Issue #264 calls for associating sail and acquisition lines as well
-- as indicating expected acquisition direction, and other data which
-- cannot be provided via standard import formats such as SPS or P1/90.
--
-- We support this via an additional table that holds most of the required
-- data. This data can simply be inferred from regular preplots, e.g., line
-- direction can be deduced from preplot point order, and sail / source
-- line offsets can be taken from P1/90 headers or from a configuration
-- parameter. Alternatively, and in preference, the data can be provided
-- explicitly, which is what issue #264 asks for.
--
-- In principle, this makes at least some of the attributes of `preplot_lines`
-- redundant (at least `incr` and `ntba`) but we will leave them there for
-- the time being as technical debt.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE TABLE IF NOT EXISTS preplot_saillines
(
sailline integer NOT NULL,
line integer NOT NULL,
sailline_class character(1) NOT NULL,
line_class character(1) NOT NULL,
incr boolean NOT NULL DEFAULT true,
ntba boolean NOT NULL DEFAULT false,
remarks text NOT NULL DEFAULT '',
meta jsonb NOT NULL DEFAULT '{}'::jsonb,
hash text NULL, -- Theoretically the info in this table could all be inferred.
PRIMARY KEY (sailline, sailline_class, line, line_class, incr),
CONSTRAINT fk_sailline FOREIGN KEY (sailline, sailline_class)
REFERENCES preplot_lines (line, class)
ON UPDATE CASCADE
ON DELETE CASCADE,
CONSTRAINT fk_line FOREIGN KEY (line, line_class)
REFERENCES preplot_lines (line, class)
ON UPDATE CASCADE
ON DELETE CASCADE,
CONSTRAINT fk_hash FOREIGN KEY (hash)
REFERENCES files (hash) MATCH SIMPLE
ON UPDATE CASCADE
ON DELETE CASCADE,
CHECK (sailline_class = 'V' AND sailline_class != line_class)
);
COMMENT ON TABLE preplot_saillines
IS 'We explicitly associate each preplot sailline (aka vessel line) with zero or more source lines. This information can be inferred from preplot files, e.g., via a sailline offset value, or explicitly provided.';
-- Let us copy whatever information we can from existing tables or views
INSERT INTO preplot_saillines
(sailline, line, sailline_class, line_class, incr, ntba, remarks, meta)
SELECT DISTINCT
sailline, psp.line, 'V' sailline_class, psp.class line_class, pl.incr, pl.ntba, pl.remarks, pl.meta
FROM preplot_saillines_points psp
INNER JOIN preplot_lines pl ON psp.sailline = pl.line AND pl.class = 'V'
ORDER BY sailline
ON CONFLICT DO NOTHING;
-- We need to recreate the preplot_saillines_points view
CREATE OR REPLACE VIEW preplot_saillines_points AS
SELECT psl.sailline,
psl.ntba AS sailline_ntba,
psl.line,
pps.point,
pps.class,
pps.ntba,
pps.geometry,
pps.meta
FROM preplot_saillines psl
INNER JOIN preplot_points pps
ON psl.line = pps.line AND psl.line_class = pps.class;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.5.0' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.4.5' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.0"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.5.0"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,119 +0,0 @@
-- Sailline ancillary data
--
-- New schema version: 0.5.1
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- The sequences_detail view wrongly associates source lines and shot
-- points when it should be associating saillines and shot points instead.
--
-- This updates fixes that issue (#307).
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW sequences_detail
AS
SELECT rl.sequence,
rl.line AS sailline,
rs.line,
rs.point,
rs.tstamp,
rs.objref AS objrefraw,
fs.objref AS objreffinal,
st_transform(pp.geometry, 4326) AS geometrypreplot,
st_transform(rs.geometry, 4326) AS geometryraw,
st_transform(fs.geometry, 4326) AS geometryfinal,
ij_error(rs.line::double precision, rs.point::double precision, rs.geometry) AS errorraw,
ij_error(rs.line::double precision, rs.point::double precision, fs.geometry) AS errorfinal,
json_build_object('preplot', pp.meta, 'raw', rs.meta, 'final', fs.meta) AS meta
FROM raw_lines rl
INNER JOIN preplot_saillines psl ON rl.line = psl.sailline
INNER JOIN raw_shots rs ON rs.sequence = rl.sequence AND rs.line = psl.line
INNER JOIN preplot_points pp ON psl.line = pp.line AND psl.line_class = pp.class AND rs.point = pp.point
LEFT JOIN final_shots fs ON rl.sequence = fs.sequence AND rs.point = fs.point;
ALTER TABLE sequences_detail
OWNER TO postgres;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.5.1' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.5.0' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.1"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.5.1"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,145 +0,0 @@
-- Fix preplot_lines_summary view
--
-- New schema version: 0.5.2
--
-- WARNING: This update is buggy and does not give the desired
-- results. Schema version 0.5.4 fixes this.
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- Following introduction of `preplot_saillines` (0.5.0), the incr and
-- ntba statuses are stored in a separate table, not in `preplot_lines`
-- (TODO: a future upgrade should remove those columns from `preplot_lines`)
--
-- Now any views referencing `incr` and `ntba` must be updated to point to
-- the new location of those attributes.
--
-- This update fixes #312.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW preplot_lines_summary
AS
WITH summary AS (
SELECT DISTINCT pp.line, pp.class,
first_value(pp.point) OVER w AS p0,
last_value(pp.point) OVER w AS p1,
count(pp.point) OVER w AS num_points,
st_distance(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) AS length,
st_azimuth(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth0,
st_azimuth(last_value(pp.geometry) OVER w, first_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth1
FROM preplot_points pp
WHERE pp.class = 'V'::bpchar
WINDOW w AS (PARTITION BY pp.line ORDER BY pp.point ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT psl.line,
CASE
WHEN psl.incr THEN s.p0
ELSE s.p1
END AS fsp,
CASE
WHEN psl.incr THEN s.p1
ELSE s.p0
END AS lsp,
s.num_points,
s.length,
CASE
WHEN psl.incr THEN s.azimuth0
ELSE s.azimuth1
END AS azimuth,
psl.incr,
psl.remarks
FROM summary s
JOIN preplot_saillines psl ON psl.sailline_class = s.class AND s.line = psl.line
ORDER BY psl.line, incr;
ALTER TABLE preplot_lines_summary
OWNER TO postgres;
COMMENT ON VIEW preplot_lines_summary
IS 'Summarises ''V'' (vessel sailline) preplot lines.';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.5.2' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.5.1' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.2"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.5.2"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,132 +0,0 @@
-- Fix final_lines_summary view
--
-- New schema version: 0.5.3
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This fixes a long-standing bug, where if the sail and source lines are
-- the same, the number of missing shots will be miscounted.
--
-- This update fixes #313.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW final_lines_summary
AS
WITH summary AS (
SELECT DISTINCT fs.sequence,
first_value(fs.point) OVER w AS fsp,
last_value(fs.point) OVER w AS lsp,
first_value(fs.tstamp) OVER w AS ts0,
last_value(fs.tstamp) OVER w AS ts1,
count(fs.point) OVER w AS num_points,
count(pp.point) OVER w AS num_preplots,
st_distance(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) AS length,
st_azimuth(first_value(fs.geometry) OVER w, last_value(fs.geometry) OVER w) * 180::double precision / pi() AS azimuth
FROM final_shots fs
LEFT JOIN preplot_points pp USING (line, point)
WINDOW w AS (PARTITION BY fs.sequence ORDER BY fs.tstamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT fl.sequence,
fl.line,
s.fsp,
s.lsp,
s.ts0,
s.ts1,
s.ts1 - s.ts0 AS duration,
s.num_points,
(( SELECT count(*) AS count
FROM preplot_points
WHERE preplot_points.line = fl.line AND (preplot_points.point >= s.fsp AND preplot_points.point <= s.lsp OR preplot_points.point >= s.lsp AND preplot_points.point <= s.fsp))) - s.num_preplots AS missing_shots,
s.length,
s.azimuth,
fl.remarks,
fl.meta
FROM summary s
JOIN final_lines fl USING (sequence);
ALTER TABLE final_lines_summary
OWNER TO postgres;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.5.3' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.5.2' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.3"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.5.3"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,145 +0,0 @@
-- Fix preplot_lines_summary view
--
-- New schema version: 0.5.4
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects all schemas in the database.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- Fixes upgrade 35 (0.5.2). The original description of 0.5.2 is included
-- below for ease of reference:
--
-- Following introduction of `preplot_saillines` (0.5.0), the incr and
-- ntba statuses are stored in a separate table, not in `preplot_lines`
-- (TODO: a future upgrade should remove those columns from `preplot_lines`)
--
-- Now any views referencing `incr` and `ntba` must be updated to point to
-- the new location of those attributes.
--
-- This update fixes #312.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_survey_schema (schema_name text) AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', schema_name;
-- We need to set the search path because some of the trigger
-- functions reference other tables in survey schemas assuming
-- they are in the search path.
EXECUTE format('SET search_path TO %I,public', schema_name);
CREATE OR REPLACE VIEW preplot_lines_summary
AS
WITH summary AS (
SELECT DISTINCT pp.line,
pp.class,
first_value(pp.point) OVER w AS p0,
last_value(pp.point) OVER w AS p1,
count(pp.point) OVER w AS num_points,
st_distance(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) AS length,
st_azimuth(first_value(pp.geometry) OVER w, last_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth0,
st_azimuth(last_value(pp.geometry) OVER w, first_value(pp.geometry) OVER w) * 180::double precision / pi() AS azimuth1
FROM preplot_points pp
WHERE pp.class = 'V'::bpchar
WINDOW w AS (PARTITION BY pp.line ORDER BY pp.point ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
)
SELECT DISTINCT psl.sailline AS line,
CASE
WHEN psl.incr THEN s.p0
ELSE s.p1
END AS fsp,
CASE
WHEN psl.incr THEN s.p1
ELSE s.p0
END AS lsp,
s.num_points,
s.length,
CASE
WHEN psl.incr THEN s.azimuth0
ELSE s.azimuth1
END AS azimuth,
psl.incr,
psl.remarks
FROM summary s
JOIN preplot_saillines psl ON psl.sailline_class = s.class AND s.line = psl.sailline
ORDER BY psl.sailline, psl.incr;
ALTER TABLE preplot_lines_summary
OWNER TO postgres;
COMMENT ON VIEW preplot_lines_summary
IS 'Summarises ''V'' (vessel sailline) preplot lines.';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.5.4' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.5.3' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
FOR row IN
SELECT schema_name FROM information_schema.schemata
WHERE schema_name LIKE 'survey_%'
ORDER BY schema_name
LOOP
CALL pg_temp.upgrade_survey_schema(row.schema_name);
END LOOP;
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_survey_schema (schema_name text);
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.5.4"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.5.4"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,110 +0,0 @@
-- Fix final_lines_summary view
--
-- New schema version: 0.6.0
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade only affects the `public` schema.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This update adds a `keystore` table, intended for storing arbitrary
-- key / value pairs which, unlike, the `info` tables, is not meant to
-- be directly accessible via the API. Its main purpose as of this writing
-- is to store user definitions (see #176, #177, #180).
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
CREATE TABLE IF NOT EXISTS keystore (
type TEXT NOT NULL, -- A class of data to be stored
key TEXT NOT NULL, -- A key that is unique for the class and access type
last_modified TIMESTAMP -- To detect update conflicts
DEFAULT CURRENT_TIMESTAMP,
data jsonb,
PRIMARY KEY (type, key) -- Composite primary key
);
-- Create a function to update the last_modified timestamp
CREATE OR REPLACE FUNCTION update_last_modified()
RETURNS TRIGGER AS $$
BEGIN
NEW.last_modified = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create a trigger that calls the function before each update
CREATE OR REPLACE TRIGGER update_keystore_last_modified
BEFORE UPDATE ON keystore
FOR EACH ROW
EXECUTE FUNCTION update_last_modified();
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.0' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.5.4' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.0"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.0"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,108 +0,0 @@
-- Fix final_lines_summary view
--
-- New schema version: 0.6.1
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade only affects the `public` schema.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This update adds a default user to the system (see #176, #177, #180).
-- The default user can only be invoked by connecting from localhost.
--
-- This user has full access to every project via the organisations
-- permissions wildcard: `{"*": {read: true, write: true, edit: true}}`
-- and can be used to bootstrap the system by creating other users
-- and assigning organisational permissions.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
INSERT INTO keystore (type, key, data)
VALUES ('user', '6f1e7159-4ca0-4ae4-ab4e-89078166cc10', '
{
"id": "6f1e7159-4ca0-4ae4-ab4e-89078166cc10",
"ip": "127.0.0.0/24",
"name": "☠️",
"colour": "red",
"active": true,
"organisations": {
"*": {
"read": true,
"write": true,
"edit": true
}
}
}
'::jsonb)
ON CONFLICT (type, key) DO NOTHING;
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.1' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.6.0' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.1"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.1"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,106 +0,0 @@
-- Fix final_lines_summary view
--
-- New schema version: 0.6.2
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade only affects the `public` schema.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This update adds an "organisations" section to the configuration,
-- with a default configured organisation of "WGP" with full access.
-- This is so that projects can be made accessible after migrating
-- to the new permissions architecture.
--
-- In addition, projects with an id starting with "eq" are assumed to
-- be Equinor projects, and an additional organisation is added with
-- read-only access. This is intended for clients, which should be
-- assigned to the "Equinor organisation".
--
-- Finally, we assign the vessel to the "WGP" organisation (full access)
-- so that we can actually use administrative endpoints.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
-- Add "organisations" section to configurations, if not already present
UPDATE projects
SET
meta = jsonb_set(meta, '{organisations}', '{"WGP": {"read": true, "write": true, "edit": true}}'::jsonb, true)
WHERE meta->'organisations' IS NULL;
-- Add (or overwrite!) "organisations.Equinor" giving read-only access (can be changed later via API)
UPDATE projects
SET
meta = jsonb_set(meta, '{organisations, Equinor}', '{"read": true, "write": false, "edit": false}'::jsonb, true)
WHERE pid LIKE 'eq%';
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.2' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.6.1' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.2"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.2"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,109 +0,0 @@
-- Add procedure to decimate old nav data
--
-- New schema version: 0.6.3
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade creates a new schema called `comparisons`.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This update adds a `comparisons` table to a `comparisons` schema.
-- The `comparisons.comparisons` table holds 4D prospect comparison data.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
-- BEGIN
CREATE SCHEMA IF NOT EXISTS comparisons
AUTHORIZATION postgres;
COMMENT ON SCHEMA comparisons
IS 'Holds 4D comparison data and logic';
CREATE TABLE IF NOT EXISTS comparisons.comparisons
(
type text COLLATE pg_catalog."default" NOT NULL,
baseline_pid text COLLATE pg_catalog."default" NOT NULL,
monitor_pid text COLLATE pg_catalog."default" NOT NULL,
data bytea,
meta jsonb NOT NULL DEFAULT '{}'::jsonb,
CONSTRAINT comparisons_pkey PRIMARY KEY (baseline_pid, monitor_pid, type)
)
TABLESPACE pg_default;
ALTER TABLE IF EXISTS comparisons.comparisons
OWNER to postgres;
-- END
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.3' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.6.2' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.3"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.3"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,169 +0,0 @@
-- Add procedure to decimate old nav data
--
-- New schema version: 0.6.4
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects the public schema only.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This update modifies notify() to accept, as optional arguments, the
-- names of columns that are to be *excluded* from the notification.
-- It is intended for tables with large columns which are however of
-- no particular interest in a notification.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
-- BEGIN
CREATE OR REPLACE FUNCTION public.notify()
RETURNS trigger
LANGUAGE 'plpgsql'
COST 100
VOLATILE NOT LEAKPROOF
AS $BODY$
DECLARE
channel text := TG_ARGV[0];
pid text;
payload text;
notification text;
payload_id integer;
old_json jsonb;
new_json jsonb;
excluded_col text;
i integer;
BEGIN
-- Fetch pid
SELECT projects.pid INTO pid FROM projects WHERE schema = TG_TABLE_SCHEMA;
-- Build old and new as jsonb, excluding specified columns if provided
IF OLD IS NOT NULL THEN
old_json := row_to_json(OLD)::jsonb;
FOR i IN 1 .. TG_NARGS - 1 LOOP
excluded_col := TG_ARGV[i];
old_json := old_json - excluded_col;
END LOOP;
ELSE
old_json := NULL;
END IF;
IF NEW IS NOT NULL THEN
new_json := row_to_json(NEW)::jsonb;
FOR i IN 1 .. TG_NARGS - 1 LOOP
excluded_col := TG_ARGV[i];
new_json := new_json - excluded_col;
END LOOP;
ELSE
new_json := NULL;
END IF;
-- Build payload
payload := json_build_object(
'tstamp', CURRENT_TIMESTAMP,
'operation', TG_OP,
'schema', TG_TABLE_SCHEMA,
'table', TG_TABLE_NAME,
'old', old_json,
'new', new_json,
'pid', pid
)::text;
-- Handle large payloads
IF octet_length(payload) < 1000 THEN
PERFORM pg_notify(channel, payload);
ELSE
-- Store large payload and notify with ID (as before)
INSERT INTO notify_payloads (payload) VALUES (payload) RETURNING id INTO payload_id;
notification := json_build_object(
'tstamp', CURRENT_TIMESTAMP,
'operation', TG_OP,
'schema', TG_TABLE_SCHEMA,
'table', TG_TABLE_NAME,
'pid', pid,
'payload_id', payload_id
)::text;
PERFORM pg_notify(channel, notification);
RAISE INFO 'Payload over limit';
END IF;
RETURN NULL;
END;
$BODY$;
ALTER FUNCTION public.notify()
OWNER TO postgres;
-- END
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.4' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.6.3' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.4"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.4"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,96 +0,0 @@
-- Add procedure to decimate old nav data
--
-- New schema version: 0.6.5
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects the public schema only.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This update modifies notify() to accept, as optional arguments, the
-- names of columns that are to be *excluded* from the notification.
-- It is intended for tables with large columns which are however of
-- no particular interest in a notification.
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
-- BEGIN
CREATE OR REPLACE TRIGGER comparisons_tg
AFTER INSERT OR DELETE OR UPDATE
ON comparisons.comparisons
FOR EACH ROW
EXECUTE FUNCTION public.notify('comparisons', 'data');
-- END
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.5' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.6.4' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.5"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.5"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

View File

@@ -1,157 +0,0 @@
-- Add procedure to decimate old nav data
--
-- New schema version: 0.6.6
--
-- ATTENTION:
--
-- ENSURE YOU HAVE BACKED UP THE DATABASE BEFORE RUNNING THIS SCRIPT.
--
--
-- NOTE: This upgrade affects the public schema only.
-- NOTE: Each application starts a transaction, which must be committed
-- or rolled back.
--
-- This adds a last_project_update(pid) function. It takes a project ID
-- and returns the last known timestamp from that project. Timestamps
-- are derived from multiple sources:
--
-- - raw_shots table
-- - final_shots table
-- - events_log_full table
-- - info table where key = 'qc'
-- - files table, from the hashes (which contain the file's mtime)
-- - project configuration, looking for an _updatedOn property
--
-- To apply, run as the dougal user:
--
-- psql <<EOF
-- \i $THIS_FILE
-- COMMIT;
-- EOF
--
-- NOTE: It can be applied multiple times without ill effect.
--
BEGIN;
CREATE OR REPLACE PROCEDURE pg_temp.show_notice (notice text) AS $$
BEGIN
RAISE NOTICE '%', notice;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade_database () AS $outer$
BEGIN
RAISE NOTICE 'Updating schema %', 'public';
SET search_path TO public;
-- BEGIN
CREATE OR REPLACE FUNCTION public.last_project_update(p_pid text)
RETURNS timestamp with time zone
LANGUAGE plpgsql
AS $function$
DECLARE
v_last_ts timestamptz := NULL;
v_current_ts timestamptz;
v_current_str text;
v_current_unix numeric;
v_sid_rec record;
BEGIN
-- From raw_shots, final_shots, info, and files
FOR v_sid_rec IN SELECT schema FROM public.projects WHERE pid = p_pid
LOOP
-- From raw_shots
EXECUTE 'SELECT max(tstamp) FROM ' || v_sid_rec.schema || '.raw_shots' INTO v_current_ts;
IF v_current_ts > v_last_ts OR v_last_ts IS NULL THEN
v_last_ts := v_current_ts;
END IF;
-- From final_shots
EXECUTE 'SELECT max(tstamp) FROM ' || v_sid_rec.schema || '.final_shots' INTO v_current_ts;
IF v_current_ts > v_last_ts OR v_last_ts IS NULL THEN
v_last_ts := v_current_ts;
END IF;
-- From info where key = 'qc'
EXECUTE 'SELECT value->>''updatedOn'' FROM ' || v_sid_rec.schema || '.info WHERE key = ''qc''' INTO v_current_str;
IF v_current_str IS NOT NULL THEN
v_current_ts := v_current_str::timestamptz;
IF v_current_ts > v_last_ts OR v_last_ts IS NULL THEN
v_last_ts := v_current_ts;
END IF;
END IF;
-- From files hash second part, only for valid colon-separated hashes
EXECUTE 'SELECT max( split_part(hash, '':'', 2)::numeric ) FROM ' || v_sid_rec.schema || '.files WHERE hash ~ ''^[0-9]+:[0-9]+\\.[0-9]+:[0-9]+\\.[0-9]+:[0-9a-f]+$''' INTO v_current_unix;
IF v_current_unix IS NOT NULL THEN
v_current_ts := to_timestamp(v_current_unix);
IF v_current_ts > v_last_ts OR v_last_ts IS NULL THEN
v_last_ts := v_current_ts;
END IF;
END IF;
-- From event_log_full
EXECUTE 'SELECT max(tstamp) FROM ' || v_sid_rec.schema || '.event_log_full' INTO v_current_ts;
IF v_current_ts > v_last_ts OR v_last_ts IS NULL THEN
v_last_ts := v_current_ts;
END IF;
END LOOP;
-- From projects.meta->_updatedOn
SELECT (meta->>'_updatedOn')::timestamptz FROM public.projects WHERE pid = p_pid INTO v_current_ts;
IF v_current_ts > v_last_ts OR v_last_ts IS NULL THEN
v_last_ts := v_current_ts;
END IF;
RETURN v_last_ts;
END;
$function$;
-- END
END;
$outer$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pg_temp.upgrade () AS $outer$
DECLARE
row RECORD;
current_db_version TEXT;
BEGIN
SELECT value->>'db_schema' INTO current_db_version FROM public.info WHERE key = 'version';
IF current_db_version >= '0.6.6' THEN
RAISE EXCEPTION
USING MESSAGE='Patch already applied';
END IF;
IF current_db_version != '0.6.5' THEN
RAISE EXCEPTION
USING MESSAGE='Invalid database version: ' || current_db_version,
HINT='Ensure all previous patches have been applied.';
END IF;
CALL pg_temp.upgrade_database();
END;
$outer$ LANGUAGE plpgsql;
CALL pg_temp.upgrade();
CALL pg_temp.show_notice('Cleaning up');
DROP PROCEDURE pg_temp.upgrade_database ();
DROP PROCEDURE pg_temp.upgrade ();
CALL pg_temp.show_notice('Updating db_schema version');
INSERT INTO public.info VALUES ('version', '{"db_schema": "0.6.6"}')
ON CONFLICT (key) DO UPDATE
SET value = public.info.value || '{"db_schema": "0.6.6"}' WHERE public.info.key = 'version';
CALL pg_temp.show_notice('All done. You may now run "COMMIT;" to persist the changes');
DROP PROCEDURE pg_temp.show_notice (notice text);
--
--NOTE Run `COMMIT;` now if all went well
--

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -7,51 +7,34 @@
id: missing_shots
check: |
const sequence = currentItem;
let results;
if (sequence.missing_shots) {
results = {
shots: {}
}
const missing_shots = missingShotpoints.filter(i => !i.ntba);
for (const shot of missing_shots) {
results.shots[shot.point] = { remarks: "Missed shot", labels: [ "QC", "QCAcq" ] };
}
} else {
results = true;
}
const sp0 = Math.min(sequence.fsp, sequence.lsp);
const sp1 = Math.max(sequence.fsp, sequence.lsp);
const missing = preplots.filter(r => r.line == sequence.line &&
r.point >= sp0 && r.point <= sp1 &&
!sequence.shots.find(s => s.point == r.point)
);
results;
missing.length == 0 || missing.map(r => `Missing shot: ${r.point}`).join("\n")
-
name: "Gun QC"
disabled: false
labels: [ "QC", "QCGuns" ]
children:
-
name: "Sequences without gun data"
iterate: "sequences"
id: seq_no_gun_data
check: |
shotpoints.some(i => i.meta?.raw?.smsrc) || "Sequence has no gun data"
-
name: "Missing gun data"
id: missing_gun_data
ignoreAllFailed: true
check: |
!!currentItem._("raw_meta.smsrc.guns")
? true
: "Missing gun data"
!!currentItem._("raw_meta.smsrc.guns") || "Missing gun data"
-
name: "No fire"
id: no_fire
check: |
// const currentShot = currentItem;
// const gunData = currentItem._("raw_meta.smsrc");
// (gunData && gunData.guns && gunData.guns.length != gunData.num_active)
// ? `Source ${gunData.src_number}: No fire (${gunData.guns.length - gunData.num_active} guns)`
// : true;
// Disabled due to changes in Smartsource software. It now returns all guns on every shot, not just active ones.
true
const currentShot = currentItem;
const gunData = currentItem._("raw_meta.smsrc");
(gunData && gunData.num_nofire != 0)
? `Source ${gunData.src_number}: No fire (${gunData.num_nofire} guns)`
: true;
-
name: "Pressure errors"
@@ -64,8 +47,8 @@
.guns
.filter(gun => ((gun[2] == gunData.src_number) && (gun[pressure]/parameters.gunPressureNominal - 1) > parameters.gunPressureToleranceRatio))
.map(gun =>
`source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}, pressure: ${gun[pressure]} / ${parameters.gunPressureNominal} = ${(Math.abs(gun[pressure]/parameters.gunPressureNominal - 1)*100).toFixed(2)}% > ${(parameters.gunPressureToleranceRatio*100).toFixed(2)}%`
).join(" \n");
`source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}, pressure: ${gun[pressure]} / ${parameters.gunPressureNominal} = ${(Math.abs(gunData.manifold/parameters.gunPressureNominal - 1)*100).toFixed(1)}% > ${(parameters.gunPressureToleranceRatio*100).toFixed(1)}%`
);
results && results.length
? results
: true
@@ -167,7 +150,7 @@
.filter(gun => Math.abs(gun[firetime]-gun[aimpoint]) >= parameters.gunTimingWarning && Math.abs(gun[firetime]-gun[aimpoint]) <= parameters.gunTiming)
.forEach(gun => {
const value = Math.abs(gun[firetime]-gun[aimpoint]);
result.push(`Delta warning: source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}: ${parameters.gunTimingWarning} ≤ ${value.toFixed(2)} ≤ ${parameters.gunTiming}`);
result.push(`Delta error: source ${gun[2]}, string ${gun[0]}, gun ${gun[1]}: ${parameters.gunTimingWarning} ≤ ${value.toFixed(2)} ≤ ${parameters.gunTiming}`);
});
}
if (result.length) {
@@ -209,7 +192,7 @@
check: |
const currentShot = currentItem;
Math.abs(currentShot.error_i) <= parameters.crosslineError
|| `Crossline error (${currentShot.type}): ${currentShot.error_i.toFixed(2)} > ${parameters.crosslineError}`
|| `Crossline error: ${currentShot.error_i.toFixed(1)} > ${parameters.crosslineError}`
-
name: "Inline"
@@ -217,7 +200,7 @@
check: |
const currentShot = currentItem;
Math.abs(currentShot.error_j) <= parameters.inlineError
|| `Inline error (${currentShot.type}): ${currentShot.error_j.toFixed(2)} > ${parameters.inlineError}`
|| `Inline error: ${currentShot.error_j.toFixed(1)} > ${parameters.inlineError}`
-
name: "Centre of source preplot deviation (moving average)"
@@ -230,16 +213,11 @@
id: crossline_average
check: |
const currentSequence = currentItem;
//const i_err = shotpoints.filter(s => s.error_i != null).map(a => a.error_i);
const i_err = shotpoints.map(i =>
(i.errorfinal?.coordinates ?? i.errorraw?.coordinates)[0]
)
.filter(i => !isNaN(i));
const i_err = currentSequence.shots.filter(s => s.error_i != null).map(a => a.error_i);
if (i_err.length) {
const avg = i_err.reduce( (a, b) => a+b)/i_err.length;
avg <= parameters.crosslineErrorAverage ||
`Average crossline error: ${avg.toFixed(2)} > ${parameters.crosslineErrorAverage}`
`Average crossline error: ${avg.toFixed(1)} > ${parameters.crosslineErrorAverage}`
} else {
`Sequence ${currentSequence.sequence} has no shots within preplot`
}
@@ -252,27 +230,16 @@
check: |
const currentSequence = currentItem;
const n = parameters.inlineErrorRunningAverageShots; // For brevity
const results = shotpoints.slice(n/2, -n/2).map( (shot, index) => {
const shots = shotpoints.slice(index, index+n).map(i =>
(i.errorfinal?.coordinates ?? i.errorraw?.coordinates)[1]
).filter(i => i !== null);
const results = currentSequence.shots.slice(n/2, -n/2).map( (shot, index) => {
const shots = currentSequence.shots.slice(index, index+n).map(i => i.error_j).filter(i => i !== null);
if (!shots.length) {
// We are outside the preplot
// Nothing to see here, move along
return true;
}
const mean = shots.reduce( (a, b) => a+b ) / shots.length;
return Math.abs(mean) <= parameters.inlineErrorRunningAverageValue || [
shot.point,
{
remarks: `Running average inline error: ${mean.toFixed(2)} > ${parameters.inlineErrorRunningAverageValue}`,
labels: [ "QC", "QCNav" ]
}
]
return Math.abs(mean) <= parameters.inlineErrorRunningAverageValue ||
`Running average inline error: shot ${shot.point}, ${mean.toFixed(1)} > ${parameters.inlineErrorRunningAverageValue}`
}).filter(i => i !== true);
results.length == 0 || results.join("\n");
results.length == 0 || {
remarks: "Sequence exceeds inline error running average limit",
shots: Object.fromEntries(results)
}

View File

@@ -1,3 +0,0 @@
# TLS certificates directory
Drop TLS certificates required by Dougal in this directory. It is excluded by [`.gitignore`](../../.gitignore) so its contents should never be committed by accident (and shouldn't be committed on purpose!).

View File

@@ -1,9 +1,6 @@
{
"jwt": {
"secret": "",
"options": {
"expiresIn": 1800
}
"secret": ""
},
"db": {
"user": "postgres",

View File

@@ -1,968 +0,0 @@
const codeToType = {
0: Int8Array,
1: Uint8Array,
2: Int16Array,
3: Uint16Array,
4: Int32Array,
5: Uint32Array,
7: Float32Array,
8: Float64Array,
9: BigInt64Array,
10: BigUint64Array
};
const typeToBytes = {
Int8Array: 1,
Uint8Array: 1,
Int16Array: 2,
Uint16Array: 2,
Int32Array: 4,
Uint32Array: 4,
Float32Array: 4,
Float64Array: 8,
BigInt64Array: 8,
BigUint64Array: 8
};
function readTypedValue(view, offset, type) {
switch (type) {
case Int8Array: return view.getInt8(offset);
case Uint8Array: return view.getUint8(offset);
case Int16Array: return view.getInt16(offset, true);
case Uint16Array: return view.getUint16(offset, true);
case Int32Array: return view.getInt32(offset, true);
case Uint32Array: return view.getUint32(offset, true);
case Float32Array: return view.getFloat32(offset, true);
case Float64Array: return view.getFloat64(offset, true);
case BigInt64Array: return view.getBigInt64(offset, true);
case BigUint64Array: return view.getBigUint64(offset, true);
default: throw new Error(`Unsupported type: ${type.name}`);
}
}
function writeTypedValue(view, offset, value, type) {
switch (type) {
case Int8Array: view.setInt8(offset, value); break;
case Uint8Array: view.setUint8(offset, value); break;
case Int16Array: view.setInt16(offset, value, true); break;
case Uint16Array: view.setUint16(offset, value, true); break;
case Int32Array: view.setInt32(offset, value, true); break;
case Uint32Array: view.setUint32(offset, value, true); break;
case Float32Array: view.setFloat32(offset, value, true); break;
case Float64Array: view.setFloat64(offset, value, true); break;
case BigInt64Array: view.setBigInt64(offset, BigInt(value), true); break;
case BigUint64Array: view.setBigUint64(offset, BigInt(value), true); break;
default: throw new Error(`Unsupported type: ${type.name}`);
}
}
class DougalBinaryBundle extends ArrayBuffer {
static HEADER_LENGTH = 4; // Length of a bundle header
/** Clone an existing ByteArray into a DougalBinaryBundle
*/
static clone (buffer) {
const clone = new DougalBinaryBundle(buffer.byteLength);
const uint8Array = new Uint8Array(buffer);
const uint8ArrayClone = new Uint8Array(clone);
uint8ArrayClone.set(uint8Array);
return clone;
}
constructor (length, options) {
super (length, options);
}
/** Get the count of bundles in this ByteArray.
*
* Stops at the first non-bundle looking offset
*/
get bundleCount () {
let count = 0;
let currentBundleOffset = 0;
const view = new DataView(this);
while (currentBundleOffset < this.byteLength) {
const currentBundleHeader = view.getUint32(currentBundleOffset, true);
if ((currentBundleHeader & 0xff) !== 0x1c) {
// This is not a bundle
return count;
}
let currentBundleLength = currentBundleHeader >>> 8;
currentBundleOffset += currentBundleLength + DougalBinaryBundle.HEADER_LENGTH;
count++;
}
return count;
}
/** Get the number of chunks in the bundles of this ByteArray
*/
get chunkCount () {
let count = 0;
let bundleOffset = 0;
const view = new DataView(this);
while (bundleOffset < this.byteLength) {
const header = view.getUint32(bundleOffset, true);
if ((header & 0xFF) !== 0x1C) break;
const length = header >>> 8;
if (bundleOffset + 4 + length > this.byteLength) break;
let chunkOffset = bundleOffset + 4; // relative to buffer start
while (chunkOffset < bundleOffset + 4 + length) {
const chunkType = view.getUint8(chunkOffset);
if (chunkType !== 0x11 && chunkType !== 0x12) break;
const cCount = view.getUint16(chunkOffset + 2, true);
const ΔelemC = view.getUint8(chunkOffset + 10);
const elemC = view.getUint8(chunkOffset + 11);
let localOffset = 12; // header size
localOffset += ΔelemC + elemC; // preface
// initial values
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(chunkOffset + 12 + k);
const baseCode = typeByte & 0xF;
const baseType = codeToType[baseCode];
if (!baseType) throw new Error('Invalid base type code');
localOffset += typeToBytes[baseType.name];
}
// pad after initial
while (localOffset % 4 !== 0) localOffset++;
if (chunkType === 0x11) { // Sequential
// record data: Δelems incrs
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(chunkOffset + 12 + k);
const incrCode = typeByte >> 4;
const incrType = codeToType[incrCode];
if (!incrType) throw new Error('Invalid incr type code');
localOffset += cCount * typeToBytes[incrType.name];
}
// elems
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
localOffset += cCount * typeToBytes[type.name];
}
} else { // Interleaved
// Compute exact stride for interleaved record data
let ΔelemStride = 0;
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(chunkOffset + 12 + k);
const incrCode = typeByte >> 4;
const incrType = codeToType[incrCode];
if (!incrType) throw new Error('Invalid incr type code');
ΔelemStride += typeToBytes[incrType.name];
}
let elemStride = 0;
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
elemStride += typeToBytes[type.name];
}
const recordStride = ΔelemStride + elemStride;
localOffset += cCount * recordStride;
}
// pad after record
while (localOffset % 4 !== 0) localOffset++;
chunkOffset += localOffset;
count++;
}
bundleOffset += 4 + length;
}
return count;
}
/** Return an array of DougalBinaryChunkSequential or DougalBinaryChunkInterleaved instances
*/
chunks () {
const chunks = [];
let bundleOffset = 0;
const view = new DataView(this);
while (bundleOffset < this.byteLength) {
const header = view.getUint32(bundleOffset, true);
if ((header & 0xFF) !== 0x1C) break;
const length = header >>> 8;
if (bundleOffset + 4 + length > this.byteLength) break;
let chunkOffset = bundleOffset + 4;
while (chunkOffset < bundleOffset + 4 + length) {
const chunkType = view.getUint8(chunkOffset);
if (chunkType !== 0x11 && chunkType !== 0x12) break;
const cCount = view.getUint16(chunkOffset + 2, true);
const ΔelemC = view.getUint8(chunkOffset + 10);
const elemC = view.getUint8(chunkOffset + 11);
let localOffset = 12;
localOffset += ΔelemC + elemC;
// initial values
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(chunkOffset + 12 + k);
const baseCode = typeByte & 0xF;
const baseType = codeToType[baseCode];
if (!baseType) throw new Error('Invalid base type code');
localOffset += typeToBytes[baseType.name];
}
// pad after initial
while (localOffset % 4 !== 0) localOffset++;
if (chunkType === 0x11) { // Sequential
// record data: Δelems incrs
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(chunkOffset + 12 + k);
const incrCode = typeByte >> 4;
const incrType = codeToType[incrCode];
if (!incrType) throw new Error('Invalid incr type code');
localOffset += cCount * typeToBytes[incrType.name];
}
// elems
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
localOffset += cCount * typeToBytes[type.name];
}
} else { // Interleaved
// Compute exact stride for interleaved record data
let ΔelemStride = 0;
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(chunkOffset + 12 + k);
const incrCode = typeByte >> 4;
const incrType = codeToType[incrCode];
if (!incrType) throw new Error('Invalid incr type code');
ΔelemStride += typeToBytes[incrType.name];
}
let elemStride = 0;
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(chunkOffset + 12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
elemStride += typeToBytes[type.name];
}
const recordStride = ΔelemStride + elemStride;
localOffset += cCount * recordStride;
}
// pad after record
while (localOffset % 4 !== 0) localOffset++;
switch (chunkType) {
case 0x11:
chunks.push(new DougalBinaryChunkSequential(this, chunkOffset, localOffset));
break;
case 0x12:
chunks.push(new DougalBinaryChunkInterleaved(this, chunkOffset, localOffset));
break;
default:
throw new Error('Invalid chunk type');
}
chunkOffset += localOffset;
}
bundleOffset += 4 + length;
}
return chunks;
}
/** Return a ByteArray containing all data from all
* chunks including reconstructed i, j and incremental
* values as follows:
*
* <i_0> <i_1> … <i_x> // i values (constant)
* <j_0> <j_1> … <j_x> // j values (j0 + Δj*i)
* <Δelem_0_0> <Δelem_0_1> … <Δelem_0_x> // reconstructed Δelem0 (uses baseType)
* <Δelem_1_0> <Δelem_1_1> … <Δelem_1_x> // reconstructed Δelem1
* …
* <Δelem_y_0> <Δelem_y_1> … <Δelem_y_x> // reconstructed Δelem1
* <elem_0_0> <elem_0_1> … <elem_0_x> // First elem
* <elem_1_0> <elem_1_1> … <elem_1_x> // Second elem
* …
* <elem_z_0> <elem_z_1> … <elem_z_x> // Last elem
*
* It does not matter whether the underlying chunks are
* sequential or interleaved. This function will transform
* as necessary.
*
*/
getDataSequentially () {
const chunks = this.chunks();
if (chunks.length === 0) return new ArrayBuffer(0);
const firstChunk = chunks[0];
const ΔelemC = firstChunk.ΔelemCount;
const elemC = firstChunk.elemCount;
// Check consistency across chunks
for (const chunk of chunks) {
if (chunk.ΔelemCount !== ΔelemC || chunk.elemCount !== elemC) {
throw new Error('Inconsistent chunk structures');
}
}
// Get types from first chunk
const view = new DataView(firstChunk);
const ΔelemBaseTypes = [];
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(12 + k);
const baseCode = typeByte & 0xF;
const baseType = codeToType[baseCode];
if (!baseType) throw new Error('Invalid base type code');
ΔelemBaseTypes.push(baseType);
}
const elemTypes = [];
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
elemTypes.push(type);
}
// Compute total records
const totalN = chunks.reduce((sum, c) => sum + c.jCount, 0);
// Compute sizes
const size_i = totalN * 2; // Uint16 for i
const size_j = totalN * 4; // Int32 for j
let size_Δelems = 0;
for (const t of ΔelemBaseTypes) {
size_Δelems += totalN * typeToBytes[t.name];
}
let size_elems = 0;
for (const t of elemTypes) {
size_elems += totalN * typeToBytes[t.name];
}
const totalSize = size_i + size_j + size_Δelems + size_elems;
const ab = new ArrayBuffer(totalSize);
const dv = new DataView(ab);
// Write i's
let off = 0;
for (const chunk of chunks) {
const i = chunk.i;
for (let idx = 0; idx < chunk.jCount; idx++) {
dv.setUint16(off, i, true);
off += 2;
}
}
// Write j's
off = size_i;
for (const chunk of chunks) {
const j0 = chunk.j0;
const Δj = chunk.Δj;
for (let idx = 0; idx < chunk.jCount; idx++) {
const j = j0 + idx * Δj;
dv.setInt32(off, j, true);
off += 4;
}
}
// Write Δelems
off = size_i + size_j;
for (let m = 0; m < ΔelemC; m++) {
const type = ΔelemBaseTypes[m];
const bytes = typeToBytes[type.name];
for (const chunk of chunks) {
const arr = chunk.Δelem(m);
for (let idx = 0; idx < chunk.jCount; idx++) {
writeTypedValue(dv, off, arr[idx], type);
off += bytes;
}
}
}
// Write elems
for (let m = 0; m < elemC; m++) {
const type = elemTypes[m];
const bytes = typeToBytes[type.name];
for (const chunk of chunks) {
const arr = chunk.elem(m);
for (let idx = 0; idx < chunk.jCount; idx++) {
writeTypedValue(dv, off, arr[idx], type);
off += bytes;
}
}
}
return ab;
}
/** Return a ByteArray containing all data from all
* chunks including reconstructed i, j and incremental
* values, interleaved as follows:
*
* <i_0> <j_0> <Δelem_0_0> <Δelem_1_0> … <Δelem_y_0> <elem_0_0> <elem_1_0> … <elem_z_0>
* <i_1> <j_1> <Δelem_0_1> <Δelem_1_1> … <Δelem_y_1> <elem_0_1> <elem_1_1> … <elem_z_1>
* <i_x> <j_x> <Δelem_0_x> <Δelem_1_x> … <Δelem_y_x> <elem_0_x> <elem_1_x> … <elem_z_x>
*
* It does not matter whether the underlying chunks are
* sequential or interleaved. This function will transform
* as necessary.
*
*/
getDataInterleaved () {
const chunks = this.chunks();
if (chunks.length === 0) return new ArrayBuffer(0);
const firstChunk = chunks[0];
const ΔelemC = firstChunk.ΔelemCount;
const elemC = firstChunk.elemCount;
// Check consistency across chunks
for (const chunk of chunks) {
if (chunk.ΔelemCount !== ΔelemC || chunk.elemCount !== elemC) {
throw new Error('Inconsistent chunk structures');
}
}
// Get types from first chunk
const view = new DataView(firstChunk);
const ΔelemBaseTypes = [];
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(12 + k);
const baseCode = typeByte & 0xF;
const baseType = codeToType[baseCode];
if (!baseType) throw new Error('Invalid base type code');
ΔelemBaseTypes.push(baseType);
}
const elemTypes = [];
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
elemTypes.push(type);
}
// Compute total records
const totalN = chunks.reduce((sum, c) => sum + c.jCount, 0);
// Compute record size
const recordSize = 2 + 4 + // i (Uint16) + j (Int32)
ΔelemBaseTypes.reduce((sum, t) => sum + typeToBytes[t.name], 0) +
elemTypes.reduce((sum, t) => sum + typeToBytes[t.name], 0);
const totalSize = totalN * recordSize;
const ab = new ArrayBuffer(totalSize);
const dv = new DataView(ab);
let off = 0;
for (const chunk of chunks) {
const i = chunk.i;
const j0 = chunk.j0;
const Δj = chunk.Δj;
for (let idx = 0; idx < chunk.jCount; idx++) {
dv.setUint16(off, i, true);
off += 2;
const j = j0 + idx * Δj;
dv.setInt32(off, j, true);
off += 4;
for (let m = 0; m < ΔelemC; m++) {
const type = ΔelemBaseTypes[m];
const bytes = typeToBytes[type.name];
const arr = chunk.Δelem(m);
writeTypedValue(dv, off, arr[idx], type);
off += bytes;
}
for (let m = 0; m < elemC; m++) {
const type = elemTypes[m];
const bytes = typeToBytes[type.name];
const arr = chunk.elem(m);
writeTypedValue(dv, off, arr[idx], type);
off += bytes;
}
}
}
return ab;
}
get records () {
const data = [];
for (const record of this) {
data.push(record.slice(1));
}
return data;
}
[Symbol.iterator]() {
const chunks = this.chunks();
let chunkIndex = 0;
let chunkIterator = chunks.length > 0 ? chunks[0][Symbol.iterator]() : null;
return {
next() {
if (!chunkIterator) {
return { done: true };
}
let result = chunkIterator.next();
while (result.done && chunkIndex < chunks.length - 1) {
chunkIndex++;
chunkIterator = chunks[chunkIndex][Symbol.iterator]();
result = chunkIterator.next();
}
return result;
}
};
}
}
class DougalBinaryChunkSequential extends ArrayBuffer {
constructor (buffer, offset, length) {
super(length);
new Uint8Array(this).set(new Uint8Array(buffer, offset, length));
this._ΔelemCaches = new Array(this.ΔelemCount);
this._elemCaches = new Array(this.elemCount);
this._ΔelemBlockOffsets = null;
this._elemBlockOffsets = null;
this._recordOffset = null;
}
_getRecordOffset() {
if (this._recordOffset !== null) return this._recordOffset;
const view = new DataView(this);
const ΔelemC = this.ΔelemCount;
const elemC = this.elemCount;
let recordOffset = 12 + ΔelemC + elemC;
for (let k = 0; k < ΔelemC; k++) {
const tb = view.getUint8(12 + k);
const bc = tb & 0xF;
const bt = codeToType[bc];
recordOffset += typeToBytes[bt.name];
}
while (recordOffset % 4 !== 0) recordOffset++;
this._recordOffset = recordOffset;
return recordOffset;
}
_initBlockOffsets() {
if (this._ΔelemBlockOffsets !== null) return;
const view = new DataView(this);
const count = this.jCount;
const ΔelemC = this.ΔelemCount;
const elemC = this.elemCount;
const recordOffset = this._getRecordOffset();
this._ΔelemBlockOffsets = [];
let o = recordOffset;
for (let k = 0; k < ΔelemC; k++) {
this._ΔelemBlockOffsets[k] = o;
const tb = view.getUint8(12 + k);
const ic = tb >> 4;
const it = codeToType[ic];
o += count * typeToBytes[it.name];
}
this._elemBlockOffsets = [];
for (let k = 0; k < elemC; k++) {
this._elemBlockOffsets[k] = o;
const tc = view.getUint8(12 + ΔelemC + k);
const t = codeToType[tc];
o += count * typeToBytes[t.name];
}
}
/** Return the user-defined value
*/
get udv () {
return new DataView(this).getUint8(1);
}
/** Return the number of j elements in this chunk
*/
get jCount () {
return new DataView(this).getUint16(2, true);
}
/** Return the i value in this chunk
*/
get i () {
return new DataView(this).getUint16(4, true);
}
/** Return the j0 value in this chunk
*/
get j0 () {
return new DataView(this).getUint16(6, true);
}
/** Return the Δj value in this chunk
*/
get Δj () {
return new DataView(this).getInt16(8, true);
}
/** Return the Δelem_count value in this chunk
*/
get ΔelemCount () {
return new DataView(this).getUint8(10);
}
/** Return the elem_count value in this chunk
*/
get elemCount () {
return new DataView(this).getUint8(11);
}
/** Return a TypedArray (e.g., Uint16Array, …) for the n-th Δelem in the chunk
*/
Δelem (n) {
if (this._ΔelemCaches[n]) return this._ΔelemCaches[n];
if (n < 0 || n >= this.ΔelemCount) throw new Error(`Invalid Δelem index: ${n}`);
const view = new DataView(this);
const count = this.jCount;
const ΔelemC = this.ΔelemCount;
const typeByte = view.getUint8(12 + n);
const baseCode = typeByte & 0xF;
const incrCode = typeByte >> 4;
const baseType = codeToType[baseCode];
const incrType = codeToType[incrCode];
if (!baseType || !incrType) throw new Error('Invalid type codes for Δelem');
// Find offset for initial value of this Δelem
let initialOffset = 12 + ΔelemC + this.elemCount;
for (let k = 0; k < n; k++) {
const tb = view.getUint8(12 + k);
const bc = tb & 0xF;
const bt = codeToType[bc];
initialOffset += typeToBytes[bt.name];
}
let current = readTypedValue(view, initialOffset, baseType);
// Advance to start of record data (after all initials and pad)
const recordOffset = this._getRecordOffset();
// Find offset for deltas of this Δelem (skip previous Δelems' delta blocks)
this._initBlockOffsets();
const deltaOffset = this._ΔelemBlockOffsets[n];
// Reconstruct the array
const arr = new baseType(count);
const isBigInt = baseType === BigInt64Array || baseType === BigUint64Array;
arr[0] = current;
for (let idx = 1; idx < count; idx++) {
let delta = readTypedValue(view, deltaOffset + idx * typeToBytes[incrType.name], incrType);
if (isBigInt) {
delta = BigInt(delta);
current += delta;
} else {
current += delta;
}
arr[idx] = current;
}
this._ΔelemCaches[n] = arr;
return arr;
}
/** Return a TypedArray (e.g., Uint16Array, …) for the n-th elem in the chunk
*/
elem (n) {
if (this._elemCaches[n]) return this._elemCaches[n];
if (n < 0 || n >= this.elemCount) throw new Error(`Invalid elem index: ${n}`);
const view = new DataView(this);
const count = this.jCount;
const ΔelemC = this.ΔelemCount;
const elemC = this.elemCount;
const typeCode = view.getUint8(12 + ΔelemC + n);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid type code for elem');
// Find offset for this elem's data block
this._initBlockOffsets();
const elemOffset = this._elemBlockOffsets[n];
// Create and populate the array
const arr = new type(count);
const bytes = typeToBytes[type.name];
for (let idx = 0; idx < count; idx++) {
arr[idx] = readTypedValue(view, elemOffset + idx * bytes, type);
}
this._elemCaches[n] = arr;
return arr;
}
getRecord (index) {
if (index < 0 || index >= this.jCount) throw new Error(`Invalid record index: ${index}`);
const arr = [this.udv, this.i, this.j0 + index * this.Δj];
for (let m = 0; m < this.ΔelemCount; m++) {
const values = this.Δelem(m);
arr.push(values[index]);
}
for (let m = 0; m < this.elemCount; m++) {
const values = this.elem(m);
arr.push(values[index]);
}
return arr;
}
[Symbol.iterator]() {
let index = 0;
const chunk = this;
return {
next() {
if (index < chunk.jCount) {
return { value: chunk.getRecord(index++), done: false };
} else {
return { done: true };
}
}
};
}
}
class DougalBinaryChunkInterleaved extends ArrayBuffer {
constructor(buffer, offset, length) {
super(length);
new Uint8Array(this).set(new Uint8Array(buffer, offset, length));
this._incrStrides = [];
this._elemStrides = [];
this._incrOffsets = [];
this._elemOffsets = [];
this._recordStride = 0;
this._recordOffset = null;
this._initStrides();
this._ΔelemCaches = new Array(this.ΔelemCount);
this._elemCaches = new Array(this.elemCount);
}
_getRecordOffset() {
if (this._recordOffset !== null) return this._recordOffset;
const view = new DataView(this);
const ΔelemC = this.ΔelemCount;
const elemC = this.elemCount;
let recordOffset = 12 + ΔelemC + elemC;
for (let k = 0; k < ΔelemC; k++) {
const tb = view.getUint8(12 + k);
const bc = tb & 0xF;
const bt = codeToType[bc];
recordOffset += typeToBytes[bt.name];
}
while (recordOffset % 4 !== 0) recordOffset++;
this._recordOffset = recordOffset;
return recordOffset;
}
_initStrides() {
const view = new DataView(this);
const ΔelemC = this.ΔelemCount;
const elemC = this.elemCount;
// Compute incr strides and offsets
let incrOffset = 0;
for (let k = 0; k < ΔelemC; k++) {
const typeByte = view.getUint8(12 + k);
const incrCode = typeByte >> 4;
const incrType = codeToType[incrCode];
if (!incrType) throw new Error('Invalid incr type code');
this._incrOffsets.push(incrOffset);
const bytes = typeToBytes[incrType.name];
this._incrStrides.push(bytes);
incrOffset += bytes;
this._recordStride += bytes;
}
// Compute elem strides and offsets
let elemOffset = incrOffset;
for (let k = 0; k < elemC; k++) {
const typeCode = view.getUint8(12 + ΔelemC + k);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid elem type code');
this._elemOffsets.push(elemOffset);
const bytes = typeToBytes[type.name];
this._elemStrides.push(bytes);
elemOffset += bytes;
this._recordStride += bytes;
}
}
get udv() {
return new DataView(this).getUint8(1);
}
get jCount() {
return new DataView(this).getUint16(2, true);
}
get i() {
return new DataView(this).getUint16(4, true);
}
get j0() {
return new DataView(this).getUint16(6, true);
}
get Δj() {
return new DataView(this).getInt16(8, true);
}
get ΔelemCount() {
return new DataView(this).getUint8(10);
}
get elemCount() {
return new DataView(this).getUint8(11);
}
Δelem(n) {
if (this._ΔelemCaches[n]) return this._ΔelemCaches[n];
if (n < 0 || n >= this.ΔelemCount) throw new Error(`Invalid Δelem index: ${n}`);
const view = new DataView(this);
const count = this.jCount;
const ΔelemC = this.ΔelemCount;
const typeByte = view.getUint8(12 + n);
const baseCode = typeByte & 0xF;
const incrCode = typeByte >> 4;
const baseType = codeToType[baseCode];
const incrType = codeToType[incrCode];
if (!baseType || !incrType) throw new Error('Invalid type codes for Δelem');
// Find offset for initial value of this Δelem
let initialOffset = 12 + ΔelemC + this.elemCount;
for (let k = 0; k < n; k++) {
const tb = view.getUint8(12 + k);
const bc = tb & 0xF;
const bt = codeToType[bc];
initialOffset += typeToBytes[bt.name];
}
let current = readTypedValue(view, initialOffset, baseType);
// Find offset to start of record data
const recordOffset = this._getRecordOffset();
// Use precomputed offset for this Δelem
const deltaOffset = recordOffset + this._incrOffsets[n];
// Reconstruct the array
const arr = new baseType(count);
const isBigInt = baseType === BigInt64Array || baseType === BigUint64Array;
arr[0] = current;
for (let idx = 1; idx < count; idx++) {
let delta = readTypedValue(view, deltaOffset + idx * this._recordStride, incrType);
if (isBigInt) {
delta = BigInt(delta);
current += delta;
} else {
current += delta;
}
arr[idx] = current;
}
this._ΔelemCaches[n] = arr;
return arr;
}
elem(n) {
if (this._elemCaches[n]) return this._elemCaches[n];
if (n < 0 || n >= this.elemCount) throw new Error(`Invalid elem index: ${n}`);
const view = new DataView(this);
const count = this.jCount;
const ΔelemC = this.ΔelemCount;
const typeCode = view.getUint8(12 + ΔelemC + n);
const type = codeToType[typeCode];
if (!type) throw new Error('Invalid type code for elem');
// Find offset to start of record data
const recordOffset = this._getRecordOffset();
// Use precomputed offset for this elem (relative to start of record data)
const elemOffset = recordOffset + this._elemOffsets[n];
// Create and populate the array
const arr = new type(count);
const bytes = typeToBytes[type.name];
for (let idx = 0; idx < count; idx++) {
arr[idx] = readTypedValue(view, elemOffset + idx * this._recordStride, type);
}
this._elemCaches[n] = arr;
return arr;
}
getRecord (index) {
if (index < 0 || index >= this.jCount) throw new Error(`Invalid record index: ${index}`);
const arr = [this.udv, this.i, this.j0 + index * this.Δj];
for (let m = 0; m < this.ΔelemCount; m++) {
const values = this.Δelem(m);
arr.push(values[index]);
}
for (let m = 0; m < this.elemCount; m++) {
const values = this.elem(m);
arr.push(values[index]);
}
return arr;
}
[Symbol.iterator]() {
let index = 0;
const chunk = this;
return {
next() {
if (index < chunk.jCount) {
return { value: chunk.getRecord(index++), done: false };
} else {
return { done: true };
}
}
};
}
}
module.exports = { DougalBinaryBundle, DougalBinaryChunkSequential, DougalBinaryChunkInterleaved }

View File

@@ -1,327 +0,0 @@
const codeToType = {
0: Int8Array,
1: Uint8Array,
2: Int16Array,
3: Uint16Array,
4: Int32Array,
5: Uint32Array,
7: Float32Array,
8: Float64Array,
9: BigInt64Array,
10: BigUint64Array
};
const typeToBytes = {
Int8Array: 1,
Uint8Array: 1,
Int16Array: 2,
Uint16Array: 2,
Int32Array: 4,
Uint32Array: 4,
Float32Array: 4,
Float64Array: 8,
BigInt64Array: 8,
BigUint64Array: 8
};
function sequential(binary) {
if (!(binary instanceof Uint8Array) || binary.length < 4) {
throw new Error('Invalid binary input');
}
const view = new DataView(binary.buffer, binary.byteOffset, binary.byteLength);
let offset = 0;
// Initialize result (assuming single i value for simplicity; extend for multiple i values if needed)
const result = { i: null, j: [], Δelems: [], elems: [] };
// Process bundles
while (offset < binary.length) {
// Read bundle header
if (offset + 4 > binary.length) throw new Error('Incomplete bundle header');
const bundleHeader = view.getUint32(offset, true);
if ((bundleHeader & 0xFF) !== 0x1C) throw new Error('Invalid bundle marker');
const bundleLength = bundleHeader >> 8;
offset += 4;
const bundleEnd = offset + bundleLength;
if (bundleEnd > binary.length) throw new Error('Bundle length exceeds input size');
// Process chunks in bundle
while (offset < bundleEnd) {
// Read chunk header
if (offset + 12 > bundleEnd) throw new Error('Incomplete chunk header');
const chunkType = view.getUint8(offset);
if (chunkType !== 0x11) throw new Error(`Unsupported chunk type: ${chunkType}`);
offset += 1; // Skip udv
offset += 1;
const count = view.getUint16(offset, true); offset += 2;
if (count > 65535) throw new Error('Chunk count exceeds 65535');
const iValue = view.getUint16(offset, true); offset += 2;
const j0 = view.getUint16(offset, true); offset += 2;
const Δj = view.getInt16(offset, true); offset += 2;
const ΔelemCount = view.getUint8(offset++); // Δelem_count
const elemCount = view.getUint8(offset++); // elem_count
// Set i value (assuming all chunks share the same i)
if (result.i === null) result.i = iValue;
else if (result.i !== iValue) throw new Error('Multiple i values not supported');
// Read preface (element types)
const ΔelemTypes = [];
for (let i = 0; i < ΔelemCount; i++) {
if (offset >= bundleEnd) throw new Error('Incomplete Δelem types');
const typeByte = view.getUint8(offset++);
const baseCode = typeByte & 0x0F;
const incrCode = typeByte >> 4;
if (!codeToType[baseCode] || !codeToType[incrCode]) {
throw new Error(`Invalid type code in Δelem: ${typeByte}`);
}
ΔelemTypes.push({ baseType: codeToType[baseCode], incrType: codeToType[incrCode] });
}
const elemTypes = [];
for (let i = 0; i < elemCount; i++) {
if (offset >= bundleEnd) throw new Error('Incomplete elem types');
const typeCode = view.getUint8(offset++);
if (!codeToType[typeCode]) throw new Error(`Invalid type code in elem: ${typeCode}`);
elemTypes.push(codeToType[typeCode]);
}
// Initialize Δelems and elems arrays if first chunk
if (!result.Δelems.length && ΔelemCount > 0) {
result.Δelems = Array(ΔelemCount).fill().map(() => []);
}
if (!result.elems.length && elemCount > 0) {
result.elems = Array(elemCount).fill().map(() => []);
}
// Read initial values for Δelems
const initialValues = [];
for (const { baseType } of ΔelemTypes) {
if (offset + typeToBytes[baseType.name] > bundleEnd) {
throw new Error('Incomplete initial values');
}
initialValues.push(readTypedValue(view, offset, baseType));
offset += typeToBytes[baseType.name];
}
// Skip padding
while (offset % 4 !== 0) {
if (offset >= bundleEnd) throw new Error('Incomplete padding after initial values');
offset++;
}
// Reconstruct j values
for (let idx = 0; idx < count; idx++) {
result.j.push(j0 + idx * Δj);
}
// Read record data (non-interleaved)
for (let i = 0; i < ΔelemCount; i++) {
let current = initialValues[i];
const values = result.Δelems[i];
const incrType = ΔelemTypes[i].incrType;
const isBigInt = typeof current === 'bigint';
for (let idx = 0; idx < count; idx++) {
if (offset + typeToBytes[incrType.name] > bundleEnd) {
throw new Error('Incomplete Δelem data');
}
let delta = readTypedValue(view, offset, incrType);
if (idx === 0) {
values.push(isBigInt ? Number(current) : current);
} else {
if (isBigInt) {
delta = BigInt(delta);
current += delta;
values.push(Number(current));
} else {
current += delta;
values.push(current);
}
}
offset += typeToBytes[incrType.name];
}
}
for (let i = 0; i < elemCount; i++) {
const values = result.elems[i];
const type = elemTypes[i];
const isBigInt = type === BigInt64Array || type === BigUint64Array;
for (let idx = 0; idx < count; idx++) {
if (offset + typeToBytes[type.name] > bundleEnd) {
throw new Error('Incomplete elem data');
}
let value = readTypedValue(view, offset, type);
values.push(isBigInt ? Number(value) : value);
offset += typeToBytes[type.name];
}
}
// Skip padding
while (offset % 4 !== 0) {
if (offset >= bundleEnd) throw new Error('Incomplete padding after record data');
offset++;
}
}
}
return result;
}
function interleaved(binary) {
if (!(binary instanceof Uint8Array) || binary.length < 4) {
throw new Error('Invalid binary input');
}
const view = new DataView(binary.buffer, binary.byteOffset, binary.byteLength);
let offset = 0;
// Initialize result (assuming single i value for simplicity; extend for multiple i values if needed)
const result = { i: null, j: [], Δelems: [], elems: [] };
// Process bundles
while (offset < binary.length) {
// Read bundle header
if (offset + 4 > binary.length) throw new Error('Incomplete bundle header');
const bundleHeader = view.getUint32(offset, true);
if ((bundleHeader & 0xFF) !== 0x1C) throw new Error('Invalid bundle marker');
const bundleLength = bundleHeader >> 8;
offset += 4;
const bundleEnd = offset + bundleLength;
if (bundleEnd > binary.length) throw new Error('Bundle length exceeds input size');
// Process chunks in bundle
while (offset < bundleEnd) {
// Read chunk header
if (offset + 12 > bundleEnd) throw new Error('Incomplete chunk header');
const chunkType = view.getUint8(offset);
if (chunkType !== 0x12) throw new Error(`Unsupported chunk type: ${chunkType}`);
offset += 1; // Skip udv
offset += 1;
const count = view.getUint16(offset, true); offset += 2;
if (count > 65535) throw new Error('Chunk count exceeds 65535');
const iValue = view.getUint16(offset, true); offset += 2;
const j0 = view.getUint16(offset, true); offset += 2;
const Δj = view.getInt16(offset, true); offset += 2;
const ΔelemCount = view.getUint8(offset++); // Δelem_count
const elemCount = view.getUint8(offset++); // elem_count
// Set i value (assuming all chunks share the same i)
if (result.i === null) result.i = iValue;
else if (result.i !== iValue) throw new Error('Multiple i values not supported');
// Read preface (element types)
const ΔelemTypes = [];
for (let i = 0; i < ΔelemCount; i++) {
if (offset >= bundleEnd) throw new Error('Incomplete Δelem types');
const typeByte = view.getUint8(offset++);
const baseCode = typeByte & 0x0F;
const incrCode = typeByte >> 4;
if (!codeToType[baseCode] || !codeToType[incrCode]) {
throw new Error(`Invalid type code in Δelem: ${typeByte}`);
}
ΔelemTypes.push({ baseType: codeToType[baseCode], incrType: codeToType[incrCode] });
}
const elemTypes = [];
for (let i = 0; i < elemCount; i++) {
if (offset >= bundleEnd) throw new Error('Incomplete elem types');
const typeCode = view.getUint8(offset++);
if (!codeToType[typeCode]) throw new Error(`Invalid type code in elem: ${typeCode}`);
elemTypes.push(codeToType[typeCode]);
}
// Initialize Δelems and elems arrays if first chunk
if (!result.Δelems.length && ΔelemCount > 0) {
result.Δelems = Array(ΔelemCount).fill().map(() => []);
}
if (!result.elems.length && elemCount > 0) {
result.elems = Array(elemCount).fill().map(() => []);
}
// Read initial values for Δelems
const initialValues = [];
for (const { baseType } of ΔelemTypes) {
if (offset + typeToBytes[baseType.name] > bundleEnd) {
throw new Error('Incomplete initial values');
}
initialValues.push(readTypedValue(view, offset, baseType));
offset += typeToBytes[baseType.name];
}
// Skip padding
while (offset % 4 !== 0) {
if (offset >= bundleEnd) throw new Error('Incomplete padding after initial values');
offset++;
}
// Reconstruct j values
for (let idx = 0; idx < count; idx++) {
result.j.push(j0 + idx * Δj);
}
// Read interleaved record data
for (let idx = 0; idx < count; idx++) {
// Read Δelems
for (let i = 0; i < ΔelemCount; i++) {
const values = result.Δelems[i];
const incrType = ΔelemTypes[i].incrType;
const isBigInt = typeof initialValues[i] === 'bigint';
if (offset + typeToBytes[incrType.name] > bundleEnd) {
throw new Error('Incomplete Δelem data');
}
let delta = readTypedValue(view, offset, incrType);
offset += typeToBytes[incrType.name];
if (idx === 0) {
values.push(isBigInt ? Number(initialValues[i]) : initialValues[i]);
} else {
if (isBigInt) {
delta = BigInt(delta);
initialValues[i] += delta;
values.push(Number(initialValues[i]));
} else {
initialValues[i] += delta;
values.push(initialValues[i]);
}
}
}
// Read elems
for (let i = 0; i < elemCount; i++) {
const values = result.elems[i];
const type = elemTypes[i];
const isBigInt = type === BigInt64Array || type === BigUint64Array;
if (offset + typeToBytes[type.name] > bundleEnd) {
throw new Error('Incomplete elem data');
}
let value = readTypedValue(view, offset, type);
values.push(isBigInt ? Number(value) : value);
offset += typeToBytes[type.name];
}
}
// Skip padding
while (offset % 4 !== 0) {
if (offset >= bundleEnd) throw new Error('Incomplete padding after record data');
offset++;
}
}
}
return result;
}
function readTypedValue(view, offset, type) {
switch (type) {
case Int8Array: return view.getInt8(offset);
case Uint8Array: return view.getUint8(offset);
case Int16Array: return view.getInt16(offset, true);
case Uint16Array: return view.getUint16(offset, true);
case Int32Array: return view.getInt32(offset, true);
case Uint32Array: return view.getUint32(offset, true);
case Float32Array: return view.getFloat32(offset, true);
case Float64Array: return view.getFloat64(offset, true);
case BigInt64Array: return view.getBigInt64(offset, true);
case BigUint64Array: return view.getBigUint64(offset, true);
default: throw new Error(`Unsupported type: ${type.name}`);
}
}
module.exports = { sequential, interleaved };

View File

@@ -1,380 +0,0 @@
const typeToCode = {
Int8Array: 0,
Uint8Array: 1,
Int16Array: 2,
Uint16Array: 3,
Int32Array: 4,
Uint32Array: 5,
Float32Array: 7, // Float16 not natively supported in JS, use Float32
Float64Array: 8,
BigInt64Array: 9,
BigUint64Array: 10
};
const typeToBytes = {
Int8Array: 1,
Uint8Array: 1,
Int16Array: 2,
Uint16Array: 2,
Int32Array: 4,
Uint32Array: 4,
Float32Array: 4,
Float64Array: 8,
BigInt64Array: 8,
BigUint64Array: 8
};
function sequential(json, iGetter, jGetter, Δelems = [], elems = [], udv = 0) {
if (!Array.isArray(json) || !json.length) return new Uint8Array(0);
if (typeof iGetter !== 'function' || typeof jGetter !== 'function') throw new Error('i and j must be getter functions');
Δelems.forEach((elem, idx) => {
if (typeof elem.key !== 'function') throw new Error(`Δelems[${idx}].key must be a getter function`);
});
elems.forEach((elem, idx) => {
if (typeof elem.key !== 'function') throw new Error(`elems[${idx}].key must be a getter function`);
});
// Group records by i value
const groups = new Map();
for (const record of json) {
const iValue = iGetter(record);
if (iValue == null) throw new Error('Missing i value from getter');
if (!groups.has(iValue)) groups.set(iValue, []);
groups.get(iValue).push(record);
}
const maxBundleSize = 0xFFFFFF; // Max bundle length (24 bits)
const buffers = [];
// Process each group (i value)
for (const [iValue, records] of groups) {
// Sort records by j to ensure consistent order
records.sort((a, b) => jGetter(a) - jGetter(b));
const jValues = records.map(jGetter);
if (jValues.some(v => v == null)) throw new Error('Missing j value from getter');
// Split records into chunks based on Δj continuity
const chunks = [];
let currentChunk = [records[0]];
let currentJ0 = jValues[0];
let currentΔj = records.length > 1 ? jValues[1] - jValues[0] : 0;
for (let idx = 1; idx < records.length; idx++) {
const chunkIndex = chunks.reduce((sum, c) => sum + c.records.length, 0);
const expectedJ = currentJ0 + (idx - chunkIndex) * currentΔj;
if (jValues[idx] !== expectedJ || idx - chunkIndex >= 65536) {
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
currentChunk = [records[idx]];
currentJ0 = jValues[idx];
currentΔj = idx + 1 < records.length ? jValues[idx + 1] - jValues[idx] : 0;
} else {
currentChunk.push(records[idx]);
}
}
if (currentChunk.length > 0) {
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
}
// Calculate total size for all chunks in this group by simulating offsets
const chunkSizes = chunks.map(({ records: chunkRecords }) => {
if (chunkRecords.length > 65535) throw new Error(`Chunk size exceeds 65535 for i=${iValue}`);
let simulatedOffset = 0; // Relative to chunk start
simulatedOffset += 12; // Header
simulatedOffset += Δelems.length + elems.length; // Preface
simulatedOffset += Δelems.reduce((sum, e) => sum + typeToBytes[e.baseType.name], 0); // Initial values
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after initial
simulatedOffset += chunkRecords.length * (
Δelems.reduce((sum, e) => sum + typeToBytes[e.incrType.name], 0) +
elems.reduce((sum, e) => sum + typeToBytes[e.type.name], 0)
); // Record data
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after record
return simulatedOffset;
});
const totalChunkSize = chunkSizes.reduce((sum, size) => sum + size, 0);
// Start a new bundle if needed
const lastBundle = buffers[buffers.length - 1];
if (!lastBundle || lastBundle.offset + totalChunkSize > maxBundleSize) {
buffers.push({ offset: 4, buffer: null, view: null });
}
// Initialize DataView for current bundle
const currentBundle = buffers[buffers.length - 1];
if (!currentBundle.buffer) {
const requiredSize = totalChunkSize + 4;
currentBundle.buffer = new ArrayBuffer(requiredSize);
currentBundle.view = new DataView(currentBundle.buffer);
}
// Process each chunk
for (const { records: chunkRecords, j0, Δj } of chunks) {
const chunkSize = chunkSizes.shift();
// Ensure buffer is large enough
if (currentBundle.offset + chunkSize > currentBundle.buffer.byteLength) {
const newSize = currentBundle.offset + chunkSize;
const newBuffer = new ArrayBuffer(newSize);
new Uint8Array(newBuffer).set(new Uint8Array(currentBundle.buffer));
currentBundle.buffer = newBuffer;
currentBundle.view = new DataView(newBuffer);
}
// Write chunk header
let offset = currentBundle.offset;
currentBundle.view.setUint8(offset++, 0x11); // Chunk type
currentBundle.view.setUint8(offset++, udv); // udv
currentBundle.view.setUint16(offset, chunkRecords.length, true); offset += 2; // count
currentBundle.view.setUint16(offset, iValue, true); offset += 2; // i
currentBundle.view.setUint16(offset, j0, true); offset += 2; // j0
currentBundle.view.setInt16(offset, Δj, true); offset += 2; // Δj
currentBundle.view.setUint8(offset++, Δelems.length); // Δelem_count
currentBundle.view.setUint8(offset++, elems.length); // elem_count
// Write chunk preface (element types)
for (const elem of Δelems) {
const baseCode = typeToCode[elem.baseType.name];
const incrCode = typeToCode[elem.incrType.name];
currentBundle.view.setUint8(offset++, (incrCode << 4) | baseCode);
}
for (const elem of elems) {
currentBundle.view.setUint8(offset++, typeToCode[elem.type.name]);
}
// Write initial values for Δelems
for (const elem of Δelems) {
const value = elem.key(chunkRecords[0]);
if (value == null) throw new Error('Missing Δelem value from getter');
writeTypedValue(currentBundle.view, offset, value, elem.baseType);
offset += typeToBytes[elem.baseType.name];
}
// Pad to 4-byte boundary
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
// Write record data (non-interleaved)
for (const elem of Δelems) {
let prev = elem.key(chunkRecords[0]);
for (let idx = 0; idx < chunkRecords.length; idx++) {
const value = idx === 0 ? 0 : elem.key(chunkRecords[idx]) - prev;
writeTypedValue(currentBundle.view, offset, value, elem.incrType);
offset += typeToBytes[elem.incrType.name];
prev = elem.key(chunkRecords[idx]);
}
}
for (const elem of elems) {
for (const record of chunkRecords) {
const value = elem.key(record);
if (value == null) throw new Error('Missing elem value from getter');
writeTypedValue(currentBundle.view, offset, value, elem.type);
offset += typeToBytes[elem.type.name];
}
}
// Pad to 4-byte boundary
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
// Update bundle offset
currentBundle.offset = offset;
}
// Update bundle header
currentBundle.view.setUint32(0, 0x1C | ((currentBundle.offset - 4) << 8), true);
}
// Combine buffers into final Uint8Array
const finalLength = buffers.reduce((sum, b) => sum + b.offset, 0);
const result = new Uint8Array(finalLength);
let offset = 0;
for (const { buffer, offset: bundleOffset } of buffers) {
result.set(new Uint8Array(buffer, 0, bundleOffset), offset);
offset += bundleOffset;
}
return result;
}
function interleaved(json, iGetter, jGetter, Δelems = [], elems = [], udv = 0) {
if (!Array.isArray(json) || !json.length) return new Uint8Array(0);
if (typeof iGetter !== 'function' || typeof jGetter !== 'function') throw new Error('i and j must be getter functions');
Δelems.forEach((elem, idx) => {
if (typeof elem.key !== 'function') throw new Error(`Δelems[${idx}].key must be a getter function`);
});
elems.forEach((elem, idx) => {
if (typeof elem.key !== 'function') throw new Error(`elems[${idx}].key must be a getter function`);
});
// Group records by i value
const groups = new Map();
for (const record of json) {
const iValue = iGetter(record);
if (iValue == null) throw new Error('Missing i value from getter');
if (!groups.has(iValue)) groups.set(iValue, []);
groups.get(iValue).push(record);
}
const maxBundleSize = 0xFFFFFF; // Max bundle length (24 bits)
const buffers = [];
// Process each group (i value)
for (const [iValue, records] of groups) {
// Sort records by j to ensure consistent order
records.sort((a, b) => jGetter(a) - jGetter(b));
const jValues = records.map(jGetter);
if (jValues.some(v => v == null)) throw new Error('Missing j value from getter');
// Split records into chunks based on Δj continuity
const chunks = [];
let currentChunk = [records[0]];
let currentJ0 = jValues[0];
let currentΔj = records.length > 1 ? jValues[1] - jValues[0] : 0;
for (let idx = 1; idx < records.length; idx++) {
const chunkIndex = chunks.reduce((sum, c) => sum + c.records.length, 0);
const expectedJ = currentJ0 + (idx - chunkIndex) * currentΔj;
if (jValues[idx] !== expectedJ || idx - chunkIndex >= 65536) {
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
currentChunk = [records[idx]];
currentJ0 = jValues[idx];
currentΔj = idx + 1 < records.length ? jValues[idx + 1] - jValues[idx] : 0;
} else {
currentChunk.push(records[idx]);
}
}
if (currentChunk.length > 0) {
chunks.push({ records: currentChunk, j0: currentJ0, Δj: currentΔj });
}
// Calculate total size for all chunks in this group by simulating offsets
const chunkSizes = chunks.map(({ records: chunkRecords }) => {
if (chunkRecords.length > 65535) throw new Error(`Chunk size exceeds 65535 for i=${iValue}`);
let simulatedOffset = 0; // Relative to chunk start
simulatedOffset += 12; // Header
simulatedOffset += Δelems.length + elems.length; // Preface
simulatedOffset += Δelems.reduce((sum, e) => sum + typeToBytes[e.baseType.name], 0); // Initial values
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after initial
simulatedOffset += chunkRecords.length * (
Δelems.reduce((sum, e) => sum + typeToBytes[e.incrType.name], 0) +
elems.reduce((sum, e) => sum + typeToBytes[e.type.name], 0)
); // Interleaved record data
while (simulatedOffset % 4 !== 0) simulatedOffset++; // Pad after record
return simulatedOffset;
});
const totalChunkSize = chunkSizes.reduce((sum, size) => sum + size, 0);
// Start a new bundle if needed
const lastBundle = buffers[buffers.length - 1];
if (!lastBundle || lastBundle.offset + totalChunkSize > maxBundleSize) {
buffers.push({ offset: 4, buffer: null, view: null });
}
// Initialize DataView for current bundle
const currentBundle = buffers[buffers.length - 1];
if (!currentBundle.buffer) {
const requiredSize = totalChunkSize + 4;
currentBundle.buffer = new ArrayBuffer(requiredSize);
currentBundle.view = new DataView(currentBundle.buffer);
}
// Process each chunk
for (const { records: chunkRecords, j0, Δj } of chunks) {
const chunkSize = chunkSizes.shift();
// Ensure buffer is large enough
if (currentBundle.offset + chunkSize > currentBundle.buffer.byteLength) {
const newSize = currentBundle.offset + chunkSize;
const newBuffer = new ArrayBuffer(newSize);
new Uint8Array(newBuffer).set(new Uint8Array(currentBundle.buffer));
currentBundle.buffer = newBuffer;
currentBundle.view = new DataView(newBuffer);
}
// Write chunk header
let offset = currentBundle.offset;
currentBundle.view.setUint8(offset++, 0x12); // Chunk type
currentBundle.view.setUint8(offset++, udv); // udv
currentBundle.view.setUint16(offset, chunkRecords.length, true); offset += 2; // count
currentBundle.view.setUint16(offset, iValue, true); offset += 2; // i
currentBundle.view.setUint16(offset, j0, true); offset += 2; // j0
currentBundle.view.setInt16(offset, Δj, true); offset += 2; // Δj
currentBundle.view.setUint8(offset++, Δelems.length); // Δelem_count
currentBundle.view.setUint8(offset++, elems.length); // elem_count
// Write chunk preface (element types)
for (const elem of Δelems) {
const baseCode = typeToCode[elem.baseType.name];
const incrCode = typeToCode[elem.incrType.name];
currentBundle.view.setUint8(offset++, (incrCode << 4) | baseCode);
}
for (const elem of elems) {
currentBundle.view.setUint8(offset++, typeToCode[elem.type.name]);
}
// Write initial values for Δelems
for (const elem of Δelems) {
const value = elem.key(chunkRecords[0]);
if (value == null) throw new Error('Missing Δelem value from getter');
writeTypedValue(currentBundle.view, offset, value, elem.baseType);
offset += typeToBytes[elem.baseType.name];
}
// Pad to 4-byte boundary
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
// Write interleaved record data
const prevValues = Δelems.map(elem => elem.key(chunkRecords[0]));
for (let idx = 0; idx < chunkRecords.length; idx++) {
// Write Δelems increments
for (let i = 0; i < Δelems.length; i++) {
const elem = Δelems[i];
const value = idx === 0 ? 0 : elem.key(chunkRecords[idx]) - prevValues[i];
writeTypedValue(currentBundle.view, offset, value, elem.incrType);
offset += typeToBytes[elem.incrType.name];
prevValues[i] = elem.key(chunkRecords[idx]);
}
// Write elems
for (const elem of elems) {
const value = elem.key(chunkRecords[idx]);
if (value == null) throw new Error('Missing elem value from getter');
writeTypedValue(currentBundle.view, offset, value, elem.type);
offset += typeToBytes[elem.type.name];
}
}
// Pad to 4-byte boundary
while (offset % 4 !== 0) currentBundle.view.setUint8(offset++, 0);
// Update bundle offset
currentBundle.offset = offset;
}
// Update bundle header
currentBundle.view.setUint32(0, 0x1C | ((currentBundle.offset - 4) << 8), true);
}
// Combine buffers into final Uint8Array
const finalLength = buffers.reduce((sum, b) => sum + b.offset, 0);
const result = new Uint8Array(finalLength);
let offset = 0;
for (const { buffer, offset: bundleOffset } of buffers) {
result.set(new Uint8Array(buffer, 0, bundleOffset), offset);
offset += bundleOffset;
}
return result;
}
function writeTypedValue(view, offset, value, type) {
switch (type) {
case Int8Array: view.setInt8(offset, value); break;
case Uint8Array: view.setUint8(offset, value); break;
case Int16Array: view.setInt16(offset, value, true); break;
case Uint16Array: view.setUint16(offset, value, true); break;
case Int32Array: view.setInt32(offset, value, true); break;
case Uint32Array: view.setUint32(offset, value, true); break;
case Float32Array: view.setFloat32(offset, value, true); break;
case Float64Array: view.setFloat64(offset, value, true); break;
case BigInt64Array: view.setBigInt64(offset, BigInt(value), true); break;
case BigUint64Array: view.setBigUint64(offset, BigInt(value), true); break;
default: throw new Error(`Unsupported type: ${type.name}`);
}
}
module.exports = { sequential, interleaved };

View File

@@ -1,139 +0,0 @@
/** Binary encoder
*
* This module encodes scalar data from a grid-like source
* into a packed binary format for bandwidth efficiency and
* speed of access.
*
* Data are indexed by i & j values, with "i" being constant
* (e.g., a sequence or line number) and "j" expected to change
* by a constant, linear amount (e.g., point numbers). All data
* from consecutive "j" values will be encoded as a single array
* (or series of arrays if multiple values are encoded).
* If there is a jump in the "j" progression, a new "chunk" will
* be started with a new array (or series of arrays).
*
* Multiple values may be encoded per (i, j) pair, using any of
* the types supported by JavaScript's TypedArray except for
* Float16 and Uint8Clamped. Each variable can be encoded with
* a different size.
*
* Values may be encoded directly or as deltas from an initial
* value. The latter is particularly efficient when dealing with
* monotonically incrementing data, such as timestamps.
*
* The conceptual packet format for sequentially encoded data
* looks like this:
*
* <msg-type> <count: x> <i> <j0> <Δj>
*
* <Δelement_count: y>
* <element_count: z>
*
* <Δelement_1_type_base> … <Δelement_y_type_base>
* <Δelement_1_type_incr> … <Δelement_y_type_incr>
* <elem_1_type> … <elem_z_type>
*
* <Δelement_1_first> … <Δelement_z_first>
*
* <Δelem_1_0> … <Δelem_1_x>
* …
* <Δelem_y_0> … <Δelem_y_x>
* <elem_1_0> … <elem_1_x>
* …
* <elem_z_0> … <elem_z_x>
*
*
* The conceptual packet format for interleaved encoded data
* looks like this:
*
*
* <msg-type> <count: x> <i> <j0> <Δj>
*
* <Δelement_count: y>
* <element_count: z>
*
* <Δelement_1_type_base> … <Δelement_y_type_base>
* <Δelement_1_type_incr> … <Δelement_y_type_incr>
* <elem_1_type> … <elem_z_type>
*
* <Δelement_1_first> … <Δelement_y_first>
*
* <Δelem_1_0> <Δelem_2_0> … <Δelem_y_0> <elem_1_0> <elem_2_0> … <elem_z_0>
* <Δelem_1_1> <Δelem_2_1> … <Δelem_y_1> <elem_1_1> <elem_2_1> … <elem_z_1>
* …
* <Δelem_1_x> <Δelem_2_x> … <Δelem_y_x> <elem_1_x> <elem_2_x> … <elem_z_x>
*
*
* Usage example:
*
* json = [
* {
* sequence: 7,
* sailline: 5354,
* line: 5356,
* point: 1068,
* tstamp: 1695448704372,
* objrefraw: 3,
* objreffinal: 4
* },
* {
* sequence: 7,
* sailline: 5354,
* line: 5352,
* point: 1070,
* tstamp: 1695448693612,
* objrefraw: 2,
* objreffinal: 3
* },
* {
* sequence: 7,
* sailline: 5354,
* line: 5356,
* point: 1072,
* tstamp: 1695448684624,
* objrefraw: 3,
* objreffinal: 4
* }
* ];
*
* deltas = [
* { key: el => el.tstamp, baseType: BigUint64Array, incrType: Int16Array }
* ];
*
* elems = [
* { key: el => el.objrefraw, type: Uint8Array },
* { key: el => el.objreffinal, type: Uint8Array }
* ];
*
* i = el => el.sequence;
*
* j = el => el.point;
*
* bundle = encode(json, i, j, deltas, elems);
*
* // bundle:
*
* Uint8Array(40) [
* 36, 0, 0, 28, 17, 0, 3, 0, 7, 0,
* 44, 4, 2, 0, 1, 2, 42, 1, 1, 116,
* 37, 158, 192, 138, 1, 0, 0, 0, 0, 0,
* 248, 213, 228, 220, 3, 2, 3, 4, 3, 4
* ]
*
* decode(bundle);
*
* {
* i: 7,
* j: [ 1068, 1070, 1072 ],
* 'Δelems': [ [ 1695448704372, 1695448693612, 1695448684624 ] ],
* elems: [ [ 3, 2, 3 ], [ 4, 3, 4 ] ]
* }
*
*/
module.exports = {
encode: {...require('./encode')},
decode: {...require('./decode')},
...require('./classes')
};

View File

@@ -1,12 +0,0 @@
{
"name": "@dougal/binary",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": ""
}

View File

@@ -1,25 +0,0 @@
class ConcurrencyLimiter {
constructor(maxConcurrent) {
this.maxConcurrent = maxConcurrent;
this.active = 0;
this.queue = [];
}
async enqueue(task) {
if (this.active >= this.maxConcurrent) {
await new Promise(resolve => this.queue.push(resolve));
}
this.active++;
try {
return await task();
} finally {
this.active--;
if (this.queue.length > 0) {
this.queue.shift()();
}
}
}
}
module.exports = ConcurrencyLimiter;

View File

@@ -1,12 +0,0 @@
{
"name": "@dougal/concurrency",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": ""
}

View File

@@ -1,75 +0,0 @@
class Organisation {
constructor (data) {
this.read = !!data?.read;
this.write = !!data?.write;
this.edit = !!data?.edit;
this.other = {};
return new Proxy(this, {
get (target, prop) {
if (prop in target) {
return target[prop]
} else {
return target.other[prop];
}
},
set (target, prop, value) {
const oldValue = target[prop] !== undefined ? target[prop] : target.other[prop];
const newValue = Boolean(value);
if (["read", "write", "edit"].includes(prop)) {
target[prop] = newValue;
} else {
target.other[prop] = newValue;
}
return true;
}
});
}
toJSON () {
return {
read: this.read,
write: this.write,
edit: this.edit,
...this.other
}
}
toString (replacer, space) {
return JSON.stringify(this.toJSON(), replacer, space);
}
/** Limit the operations to only those allowed by `other`
*/
filter (other) {
const filteredOrganisation = new Organisation();
filteredOrganisation.read = this.read && other.read;
filteredOrganisation.write = this.write && other.write;
filteredOrganisation.edit = this.edit && other.edit;
return filteredOrganisation;
}
intersect (other) {
return this.filter(other);
}
}
if (typeof module !== 'undefined' && module.exports) {
module.exports = Organisation; // CJS export
}
// ESM export
if (typeof exports !== 'undefined' && !exports.default) {
exports.default = Organisation; // ESM export
}

View File

@@ -1,225 +0,0 @@
const Organisation = require('./Organisation');
class Organisations {
#values = {}
#overlord
static entries (orgs) {
return orgs.names().map(name => [name, orgs.get(name)]);
}
constructor (data, overlord) {
if (data instanceof Organisations) {
for (const [name, value] of Organisations.entries(data)) {
this.set(name, new Organisation(value));
}
} else if (data instanceof Object) {
for (const [name, value] of Object.entries(data)) {
this.set(name, new Organisation(value));
}
} else if (data instanceof String) {
this.set(data, new Organisation());
} else if (typeof data !== "undefined") {
throw new Error("Invalid constructor argument");
}
if (overlord) {
this.#overlord = overlord;
}
}
get values () {
return this.#values;
}
get length () {
return this.names().length;
}
get overlord () {
return this.#overlord;
}
set overlord (v) {
this.#overlord = new Organisations(v);
}
/** Get the operations for `name`
*/
get (name) {
const key = Object.keys(this.values).find( k => k.toLowerCase() == name.toLowerCase() ) ?? name;
return this.values[key];
}
/** Set the operations for `name` to `value`
*
* If we have an overlord, ensure we cannot:
*
* 1. Add new organisations which the overlord
* is not a member of
* 2. Access operations that the overlord is not
* allowed to access
*/
set (name, value) {
name = String(name).trim();
const key = Object.keys(this.values).find( k => k.toLowerCase() == name.toLowerCase() ) ?? name;
const org = new Organisation(value);
if (this.overlord) {
const parent = this.overlord.get(key) ?? this.overlord.get("*");
if (parent) {
this.values[key] = parent.filter(org);
}
} else {
this.values[key] = new Organisation(value);
}
return this;
}
/** Enable the operation `op` in all organisations
*/
enableOperation (op) {
if (this.overlord) {
Object.keys(this.#values)
.filter( key => (this.overlord.get(key) ?? this.overlord.get("*"))?.[op] )
.forEach( key => this.#values[key][op] = true );
} else {
Object.values(this.#values).forEach( org => org[op] = true );
}
return this;
}
/** Disable the operation `op` in all organisations
*/
disableOperation (op) {
Object.values(this.#values).forEach( org => org[op] = false );
return this;
}
/** Create a new organisation object limited by the caller's rights
*
* The spawned Organisations instance will have the same organisations
* and rights as the caller minus the applied `mask`. With the default
* mask, the spawned object will inherit all rights except for `edit`
* rights.
*
* The "*" organisation must be explicitly assigned. It is not inherited.
*/
spawn (mask = {read: true, write: true, edit: false}) {
const parent = new Organisations();
const wildcard = this.get("*").edit; // If true, we can spawn everywhere
this.entries().forEach( ([k, v]) => {
// if (k != "*") { // This organisation is not inherited
if (v.edit || wildcard) { // We have the right to spawn in this organisation
const o = new Organisation({
read: v.read && mask.read,
write: v.write && mask.write,
edit: v.edit && mask.edit
});
parent.set(k, o);
}
// }
});
return new Organisations({}, parent);
}
remove (name) {
const key = Object.keys(this.values).find( k => k.toLowerCase() == name.toLowerCase() ) ?? name;
delete this.values[key];
}
/** Return the list of organisation names
*/
names () {
return Object.keys(this.values);
}
/** Same as this.get(name)
*/
value (name) {
return this.values[name];
}
/** Same as Object.prototype.entries
*/
entries () {
return this.names().map( name => [ name, this.value(name) ] );
}
/** Return true if the named organisation is present
*/
has (name) {
return Boolean(this.value(name));
}
/** Return only those of our organisations
* and operations present in `other`
*/
filter (other) {
const filteredOrganisations = new Organisations();
const wildcard = other.value("*");
for (const [name, org] of this.entries()) {
const ownOrg = other.value(name) ?? wildcard;
if (ownOrg) {
filteredOrganisations.set(name, org.filter(ownOrg))
}
}
return filteredOrganisations;
}
/** Return only those organisations
* that have access to the required
* operation
*/
accessToOperation (op) {
const filteredOrganisations = new Organisations();
for (const [name, org] of this.entries()) {
if (org[op]) {
filteredOrganisations.set(name, org);
}
}
return filteredOrganisations;
}
toJSON () {
const obj = {};
for (const key in this.values) {
obj[key] = this.values[key].toJSON();
}
return obj;
}
toString (replacer, space) {
return JSON.stringify(this.toJSON(), replacer, space);
}
*[Symbol.iterator] () {
for (const [name, operations] of this.entries()) {
yield {name, operations};
}
}
}
if (typeof module !== 'undefined' && module.exports) {
module.exports = Organisations; // CJS export
}
// ESM export
if (typeof exports !== 'undefined' && !exports.default) {
exports.default = Organisations; // ESM export
}

View File

@@ -1,5 +0,0 @@
module.exports = {
Organisation: require('./Organisation'),
Organisations: require('./Organisations')
}

View File

@@ -1,12 +0,0 @@
{
"name": "@dougal/organisations",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": ""
}

View File

@@ -1,364 +0,0 @@
const EventEmitter = require('events');
const { Organisations } = require('@dougal/organisations');
function randomUUID () {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
const r = Math.random() * 16 | 0;
const v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}
class User extends EventEmitter {
// Valid field names
static fields = [ "ip", "host", "name", "email", "description", "colour", "active", "organisations", "meta" ]
static validUUID (str) {
const uuidv4Rx = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
return uuidv4Rx.test(str);
}
static validIPv4 (str) {
const ipv4Rx = /^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$/;
return ipv4Rx.test(str);
}
static validIPv6 (str) {
const ipv6Rx = /^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?::[0-9a-fA-F]{1,4}){1,6}|:((?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?))|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4][0-9]|[01]?[0-9][0-9]?))))$/;
return ipv6Rx.test(str);
}
static validHostname (str) {
const hostnameRx = /^(?=.{1,253}$)(?:(?!-)[A-Za-z0-9-]{1,63}(?<!-)\.)+[A-Za-z]{2,}$/;
return hostnameRx.test(str);
}
#setString (k, v) {
if (typeof v === "undefined") {
this.values[k] = v;
} else {
this.values[k] = String(v).trim();
}
this.emit("changed", k, v);
this.#updateTimestamp();
}
#updateTimestamp (v) {
if (typeof v === "undefined") {
this.#timestamp = (new Date()).valueOf();
} else {
this.#timestamp = (new Date(v)).valueOf();
}
this.emit("last_modified", this.#timestamp);
}
// Create a new instance of `other`, where `other` is
// an instance of User or of a derived class
#clone (other = this) {
const clone = new this.constructor();
Object.assign(clone.values, other.values);
clone.organisations = new Organisations(other.organisations);
return clone;
}
values = {}
#timestamp
constructor (data) {
super();
User.fields.forEach( f => this[f] = data?.[f] );
this.values.id = data?.id ?? randomUUID();
this.values.active = !!this.active;
this.values.hash = data?.hash;
this.values.password = data?.password;
this.values.organisations = new Organisations(data?.organisations);
this.#updateTimestamp(data?.last_modified);
}
/*
* Getters
*/
get id () { return this.values.id }
get ip () { return this.values.ip }
get host () { return this.values.host }
get name () { return this.values.name }
get email () { return this.values.email }
get description () { return this.values.description }
get colour () { return this.values.colour }
get active () { return this.values.active }
get organisations () { return this.values.organisations }
get password () { return this.values.password }
get timestamp () { return new Date(this.#timestamp) }
/*
* Setters
*/
set id (v) {
if (typeof v === "undefined") {
this.values.id = randomUUID();
} else if (User.validUUID(v)) {
this.values.id = v;
} else {
throw new Error("Invalid ID format (must be UUIDv4)");
}
this.emit("changed", "id", this.values.id);
this.#updateTimestamp();
}
set ip (v) {
if (User.validIPv4(v) || User.validIPv6(v) || typeof v === "undefined") {
this.values.ip = v;
} else {
throw new Error("Invalid IP address or subnet");
}
this.emit("changed", "ip", this.values.ip);
this.#updateTimestamp();
}
set host (v) {
if (User.validHostname(v) || typeof v === "undefined") {
this.values.host = v;
} else {
throw new Error("Invalid hostname");
}
this.emit("changed", "host", this.values.host);
this.#updateTimestamp();
}
set name (v) {
this.#setString("name", v);
}
set email (v) {
// TODO should validate, buy hey!
this.#setString("email", v);
}
set description (v) {
this.#setString("description", v);
}
set colour (v) {
this.#setString("colour", v);
}
set active (v) {
this.values.active = !!v;
this.emit("changed", "active", this.values.active);
this.#updateTimestamp();
}
set organisations (v) {
this.values.organisations = new Organisations(v);
this.emit("changed", "organisations", this.values.organisations);
this.#updateTimestamp();
}
set password (v) {
this.values.password = v;
this.emit("changed", "password", this.values.password);
this.#updateTimestamp();
}
/*
* Validation methods
*/
get errors () {
let err = [];
if (!this.id) err.push("ERR_NO_ID");
if (!this.name) err.push("ERR_NO_NAME");
if (!this.organisations.length) err.push("ERR_NO_ORG");
return err;
}
get isValid () {
return this.errors.length == 0;
}
/*
* Filtering methods
*/
filter (other) {
// const filteredUser = new User(this);
const filteredUser = this.#clone();
filteredUser.organisations = this.organisations.filter(other.organisations);
return filteredUser;
}
/** Return users that are visible to me.
*
* These are users with which at leas one common organisation
* with read, write or delete access to.
*
* If we are wildcarded ("*"), we see everyone.
*
* If a peer is wildcarded, they can be seen by everone.
*/
peers (list) {
if (this.organisations.value("*")) {
return list;
} else {
return list.filter( user => this.canRead(user) );
// return list.filter( user =>
// user.organisations.value("*") ||
// user.organisations.filter(this.organisations).length > 0
// this.organisations.filter(user.organisations).length > 0
// );
}
}
/** Return users that I can edit
*
* These users must belong to an organisation
* over which I have edit rights.
*
* If we are edit wildcarded, we can edit everyone.
*/
editablePeers (list) {
const editableOrgs = this.organisations.accessToOperation("edit");
if (editableOrgs.value("*")) {
return list;
} else {
return list.filter( user => this.canEdit(user) );
// editableOrgs.filter(user.organisations).length > 0
// );
}
}
/*
* General methods
*/
/** Return `true` if we are `other`
*/
is (other) {
return this.id == other.id;
}
canDo (operation, other) {
if (this.organisations.get('*')?.[operation])
return true;
if (other instanceof User) {
return other.organisations.names().some(name => this.organisations.get(name)?.[operation]);
} else if (other instanceof Organisations) {
return other.accessToOperation(operation).names().some(name => this.organisations.get(name)?.[operation]);
} else if (other?.organisations) {
return this.canDo(operation, new Organisations(other.organisations));
} else if (other instanceof Object) {
return this.canDo(operation, new Organisations(other));
}
return false;
}
canRead (other) {
return this.canDo("read", other);
}
canWrite (other) {
return this.canDo("write", other);
}
canEdit (other) {
return this.canDo("edit", other);
}
/** Perform an edit on another user
*
* Syntax: user.edit(other).to(another);
*
* Applies to `other` the changes described in `another`
* that are permitted to `user`. The argument `another`
* must be a plain object (not a `User` instance) with
* only the properties that are to be changed.
*
* NOTE: Organisations are not merged, they are overwritten
* and then filtered to ensure that the edited user does not
* gain more privileges than those granted to the editing
* user.
*
* Example:
*
* // This causes user test77 to set user x23 to
* // inactive
* test77.edit(x23).to({active: false})
*/
edit (other) {
if (this.canEdit(other)) {
return {
to: (another) => {
const newUser = Object.assign(this.#clone(other), another);
return newUser.filter(this);
}
}
}
// Do not fail or throw but return undefined
}
/** Create a new user similar to us except it doesn't have `edit` rights
* by default
*/
spawn (init = {}, mask = {read: true, write: true, edit: false}) {
// const user = new User(init);
const user = this.#clone(init);
user.organisations = this.organisations.accessToOperation("edit").disableOperation("edit");
user.organisations.overlord = this.organisations;
return user;
}
/*
* Conversion and presentation methods
*/
toJSON () {
return {
id: this.id,
ip: this.ip,
host: this.host,
name: this.name,
email: this.email,
description: this.description,
colour: this.colour,
active: this.active,
organisations: this.organisations.toJSON(),
password: this.password
}
}
toString (replacer, space) {
return JSON.stringify(this.toJSON(), replacer, space);
}
}
if (typeof module !== 'undefined' && module.exports) {
module.exports = User; // CJS export
}
// ESM export
if (typeof exports !== 'undefined' && !exports.default) {
exports.default = User; // ESM export
}

View File

@@ -1,4 +0,0 @@
module.exports = {
User: require('./User')
}

Some files were not shown because too many files have changed in this diff Show More