diff --git a/bin/human_exports_qc.py b/bin/human_exports_qc.py
index fa4552b..65a0dd6 100755
--- a/bin/human_exports_qc.py
+++ b/bin/human_exports_qc.py
@@ -59,7 +59,7 @@ def qc_data (cursor, prefix):
else:
print("No QC data found");
return
-
+
#print("QC", qc)
index = 0
for item in qc["results"]:
diff --git a/bin/human_exports_seis.py b/bin/human_exports_seis.py
index 075dfa4..25118e3 100755
--- a/bin/human_exports_seis.py
+++ b/bin/human_exports_seis.py
@@ -39,7 +39,7 @@ def seis_data (survey):
if not pathlib.Path(pathPrefix).exists():
print(pathPrefix)
raise ValueError("Export path does not exist")
-
+
print(f"Requesting sequences for {survey['id']}")
url = f"http://localhost:3000/api/project/{survey['id']}/sequence"
r = requests.get(url)
@@ -47,12 +47,12 @@ def seis_data (survey):
for sequence in r.json():
if sequence['status'] not in ["final", "ntbp"]:
continue
-
+
filename = pathlib.Path(pathPrefix, "sequence{:0>3d}.json".format(sequence['sequence']))
if filename.exists():
print(f"Skipping export for sequence {sequence['sequence']} – file already exists")
continue
-
+
print(f"Processing sequence {sequence['sequence']}")
url = f"http://localhost:3000/api/project/{survey['id']}/event?sequence={sequence['sequence']}&missing=t"
headers = { "Accept": "application/vnd.seis+json" }
diff --git a/bin/import_final_p111.py b/bin/import_final_p111.py
index 90cbbb0..4056924 100755
--- a/bin/import_final_p111.py
+++ b/bin/import_final_p111.py
@@ -19,7 +19,7 @@ from datastore import Datastore
def add_pending_remark(db, sequence):
text = '
Marked as PENDING.
\n'
-
+
with db.conn.cursor() as cursor:
qry = "SELECT remarks FROM raw_lines WHERE sequence = %s;"
cursor.execute(qry, (sequence,))
@@ -33,7 +33,7 @@ def add_pending_remark(db, sequence):
db.maybe_commit()
def del_pending_remark(db, sequence):
-
+
with db.conn.cursor() as cursor:
qry = "SELECT remarks FROM raw_lines WHERE sequence = %s;"
cursor.execute(qry, (sequence,))
@@ -89,12 +89,12 @@ if __name__ == '__main__':
pending = pendingRx.search(filepath) is not None
if not db.file_in_db(filepath):
-
+
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
-
+
print("Importing")
match = rx.match(os.path.basename(filepath))
@@ -106,7 +106,7 @@ if __name__ == '__main__':
file_info = dict(zip(pattern["captures"], match.groups()))
file_info["meta"] = {}
-
+
if pending:
print("Skipping / removing final file because marked as PENDING", filepath)
db.del_sequence_final(file_info["sequence"])
diff --git a/bin/import_final_p190.py b/bin/import_final_p190.py
index 278ca31..083790e 100755
--- a/bin/import_final_p190.py
+++ b/bin/import_final_p190.py
@@ -51,12 +51,12 @@ if __name__ == '__main__':
print(f"Found {filepath}")
if not db.file_in_db(filepath):
-
+
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
-
+
print("Importing")
match = rx.match(os.path.basename(filepath))
diff --git a/bin/import_preplots.py b/bin/import_preplots.py
index fda9883..8c89cee 100755
--- a/bin/import_preplots.py
+++ b/bin/import_preplots.py
@@ -31,12 +31,12 @@ if __name__ == '__main__':
for file in survey["preplots"]:
print(f"Preplot: {file['path']}")
if not db.file_in_db(file["path"]):
-
+
age = time.time() - os.path.getmtime(file["path"])
if age < file_min_age:
print("Skipping file because too new", file["path"])
continue
-
+
print("Importing")
try:
preplot = preplots.from_file(file)
diff --git a/bin/import_raw_p111.py b/bin/import_raw_p111.py
index 00c15ac..bd59696 100755
--- a/bin/import_raw_p111.py
+++ b/bin/import_raw_p111.py
@@ -59,12 +59,12 @@ if __name__ == '__main__':
ntbp = False
if not db.file_in_db(filepath):
-
+
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
-
+
print("Importing")
match = rx.match(os.path.basename(filepath))
diff --git a/bin/import_raw_p190.py b/bin/import_raw_p190.py
index 141e5ee..8e9ebaf 100755
--- a/bin/import_raw_p190.py
+++ b/bin/import_raw_p190.py
@@ -54,12 +54,12 @@ if __name__ == '__main__':
print(f"Found {filepath}")
if not db.file_in_db(filepath):
-
+
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
-
+
print("Importing")
match = rx.match(os.path.basename(filepath))
diff --git a/bin/import_smsrc.py b/bin/import_smsrc.py
index a6b7ba8..8b5dc83 100755
--- a/bin/import_smsrc.py
+++ b/bin/import_smsrc.py
@@ -55,12 +55,12 @@ if __name__ == '__main__':
print(f"Found {filepath}")
if not db.file_in_db(filepath):
-
+
age = time.time() - os.path.getmtime(filepath)
if age < file_min_age:
print("Skipping file because too new", filepath)
continue
-
+
print("Importing")
match = rx.match(os.path.basename(filepath))
diff --git a/bin/insert_event.py b/bin/insert_event.py
index 6a243c4..fd3e022 100755
--- a/bin/insert_event.py
+++ b/bin/insert_event.py
@@ -14,7 +14,7 @@ def detect_schema (conn):
if __name__ == '__main__':
import argparse
-
+
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--schema", required=False, default=None, help="survey where to insert the event")
ap.add_argument("-t", "--tstamp", required=False, default=None, help="event timestamp")
@@ -30,16 +30,16 @@ if __name__ == '__main__':
schema = args["schema"]
else:
schema = detect_schema(db.conn)
-
+
if args["tstamp"]:
tstamp = args["tstamp"]
else:
tstamp = datetime.utcnow().isoformat()
-
+
message = " ".join(args["remarks"])
-
+
print("new event:", schema, tstamp, message)
-
+
if schema and tstamp and message:
db.set_survey(schema)
with db.conn.cursor() as cursor:
diff --git a/bin/p190.py b/bin/p190.py
index ca579a5..33313a4 100644
--- a/bin/p190.py
+++ b/bin/p190.py
@@ -12,7 +12,7 @@ from parse_fwr import parse_fwr
def parse_p190_header (string):
"""Parse a generic P1/90 header record.
-
+
Returns a dictionary of fields.
"""
names = [ "record_type", "header_type", "header_type_modifier", "description", "data" ]
@@ -27,7 +27,7 @@ def parse_p190_type1 (string):
"doy", "time", "spare2" ]
record = parse_fwr(string, [1, 12, 3, 1, 1, 1, 6, 10, 11, 9, 9, 6, 3, 6, 1])
return dict(zip(names, record))
-
+
def parse_p190_rcv_group (string):
"""Parse a P1/90 Type 1 receiver group record."""
names = [ "record_type",
@@ -37,7 +37,7 @@ def parse_p190_rcv_group (string):
"streamer_id" ]
record = parse_fwr(string, [1, 4, 9, 9, 4, 4, 9, 9, 4, 4, 9, 9, 4, 1])
return dict(zip(names, record))
-
+
def parse_line (string):
type = string[0]
if string[:3] == "EOF":
@@ -52,7 +52,7 @@ def parse_line (string):
def p190_type(type, records):
return [ r for r in records if r["record_type"] == type ]
-
+
def p190_header(code, records):
return [ h for h in p190_type("H", records) if h["header_type"]+h["header_type_modifier"] == code ]
@@ -86,15 +86,15 @@ def normalise_record(record):
# These are probably strings
elif "strip" in dir(record[key]):
record[key] = record[key].strip()
-
+
return record
-
+
def normalise(records):
for record in records:
normalise_record(record)
-
+
return records
-
+
def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
records = []
with open(path) as fd:
@@ -102,10 +102,10 @@ def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
line = fd.readline()
while line:
cnt = cnt + 1
-
+
if line == "EOF":
break
-
+
record = parse_line(line)
if record is not None:
if only_records:
@@ -121,9 +121,9 @@ def from_file(path, only_records=None, shot_range=None, with_objrefs=False):
records.append(record)
line = fd.readline()
-
+
return records
-
+
def apply_tstamps(recordset, tstamp=None, fix_bad_seconds=False):
#print("tstamp", tstamp, type(tstamp))
if type(tstamp) is int:
@@ -161,16 +161,16 @@ def apply_tstamps(recordset, tstamp=None, fix_bad_seconds=False):
record["tstamp"] = ts
prev[object_id(record)] = doy
break
-
+
return recordset
-
+
def dms(value):
# 591544.61N
hemisphere = 1 if value[-1] in "NnEe" else -1
seconds = float(value[-6:-1])
minutes = int(value[-8:-6])
degrees = int(value[:-8])
-
+
return (degrees + minutes/60 + seconds/3600) * hemisphere
def tod(record):
@@ -183,7 +183,7 @@ def tod(record):
m = int(time[2:4])
s = float(time[4:])
return d*86400 + h*3600 + m*60 + s
-
+
def duration(record0, record1):
ts0 = tod(record0)
ts1 = tod(record1)
@@ -198,10 +198,10 @@ def azimuth(record0, record1):
x0, y0 = float(record0["easting"]), float(record0["northing"])
x1, y1 = float(record1["easting"]), float(record1["northing"])
return math.degrees(math.atan2(x1-x0, y1-y0)) % 360
-
+
def speed(record0, record1, knots=False):
scale = 3600/1852 if knots else 1
t0 = tod(record0)
t1 = tod(record1)
return (distance(record0, record1) / math.fabs(t1-t0)) * scale
-
+
diff --git a/bin/system_dump.py b/bin/system_dump.py
index d0a31d2..50352e0 100755
--- a/bin/system_dump.py
+++ b/bin/system_dump.py
@@ -39,7 +39,7 @@ exportables = {
}
def primary_key (table, cursor):
-
+
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
qry = """
SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type
@@ -50,7 +50,7 @@ def primary_key (table, cursor):
WHERE i.indrelid = %s::regclass
AND i.indisprimary;
"""
-
+
cursor.execute(qry, (table,))
return cursor.fetchall()
diff --git a/bin/system_load.py b/bin/system_load.py
index 0c229f8..a9a95f3 100755
--- a/bin/system_load.py
+++ b/bin/system_load.py
@@ -34,7 +34,7 @@ exportables = {
}
def primary_key (table, cursor):
-
+
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
qry = """
SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type
@@ -45,13 +45,13 @@ def primary_key (table, cursor):
WHERE i.indrelid = %s::regclass
AND i.indisprimary;
"""
-
+
cursor.execute(qry, (table,))
return cursor.fetchall()
def import_table(fd, table, columns, cursor):
pk = [ r[0] for r in primary_key(table, cursor) ]
-
+
# Create temporary table to import into
temptable = "import_"+table
print("Creating temporary table", temptable)
@@ -61,29 +61,29 @@ def import_table(fd, table, columns, cursor):
AS SELECT {', '.join(pk + columns)} FROM {table}
WITH NO DATA;
"""
-
+
#print(qry)
cursor.execute(qry)
-
+
# Import into the temp table
print("Import data into temporary table")
cursor.copy_from(fd, temptable)
-
+
# Update the destination table
print("Updating destination table")
setcols = ", ".join([ f"{c} = t.{c}" for c in columns ])
wherecols = " AND ".join([ f"{table}.{c} = t.{c}" for c in pk ])
-
+
qry = f"""
UPDATE {table}
SET {setcols}
FROM {temptable} t
WHERE {wherecols};
"""
-
+
#print(qry)
cursor.execute(qry)
-
+
if __name__ == '__main__':
@@ -111,7 +111,7 @@ if __name__ == '__main__':
print(f"It looks like table {table} may have already been imported. Skipping it.")
except FileNotFoundError:
print(f"File not found. Skipping {path}")
-
+
db.conn.commit()
print("Reading surveys")
@@ -130,7 +130,7 @@ if __name__ == '__main__':
columns = exportables["survey"][table]
path = os.path.join(pathPrefix, "-"+table)
print(" ←← ", path, " →→ ", table, columns)
-
+
try:
with open(path, "rb") as fd:
if columns is not None:
@@ -143,7 +143,7 @@ if __name__ == '__main__':
print(f"It looks like table {table} may have already been imported. Skipping it.")
except FileNotFoundError:
print(f"File not found. Skipping {path}")
-
+
# If we don't commit the data does not actually get copied
db.conn.commit()
diff --git a/lib/www/client/source/src/App.vue b/lib/www/client/source/src/App.vue
index 4e03bcc..61b0427 100644
--- a/lib/www/client/source/src/App.vue
+++ b/lib/www/client/source/src/App.vue
@@ -26,7 +26,7 @@