aboutsummaryrefslogtreecommitdiffstats
path: root/webmap-cgi
diff options
context:
space:
mode:
authorGuilhem Moulin <guilhem@fripost.org>2025-05-26 23:58:03 +0200
committerGuilhem Moulin <guilhem@fripost.org>2025-05-27 00:20:00 +0200
commit0fe67edd568a53abc1d7514b14d1a8a1d2af3e0e (patch)
tree9921601b3c4c1dd96479a262b2fba31ca052ecff /webmap-cgi
parent7b256f36883b664c05a93b1ca9b6e999707dea8e (diff)
CGI: Serve only the feature's properties not the entire feature as GeoJSON.
The Web Application doesn't need the original full/non-simplified geometry. After all, OpenLayers has fetched the tiles already and the (visible part of) the geometry is already cached in the target SRS with sensible simplification factors. So there is really no need to transfer megabytes of high-precison data to the client to highlight the feature. This changes means that CGI responses will remain small hence can be buffered. To further speed up things one could store geometry area/perimeter/length as generated columns (along with the list of municipality and county codes) and serve row directly from PostgreSQL (bypassing PostGIS). Also since buffering is now an option the rows don't need be served in the same order as the request, so one could batch queries using ANY[] or IN.
Diffstat (limited to 'webmap-cgi')
-rwxr-xr-xwebmap-cgi28
1 files changed, 11 insertions, 17 deletions
diff --git a/webmap-cgi b/webmap-cgi
index c4c4142..b5e5f98 100755
--- a/webmap-cgi
+++ b/webmap-cgi
@@ -23,7 +23,7 @@
import sys
from os import path as os_path
-from json import load as json_load, JSONDecodeError
+from json import load as json_load, dumps as json_dumps, JSONDecodeError
import logging
from typing import Final, Iterator
import atexit
@@ -135,7 +135,9 @@ def get_query_map(layernames : set[str]) -> dict[str,bytes]:
resp = cur.fetchone()
while resp is not None:
c = resp[0]
+ # pylint: disable-next=too-many-boolean-expressions
if (c in ('layer_group', 'layer') or
+ (c == 'ogc_fid' and pkey_col != c) or
(d == 2 and c in ('geom_area', 'geom_perimeter')) or
(d == 1 and c == 'geom_length')):
logging.warning('Duplicate column name "%s"', c)
@@ -143,17 +145,14 @@ def get_query_map(layernames : set[str]) -> dict[str,bytes]:
column_names.append(c)
resp = cur.fetchone()
- # TODO[trixie] id_column => 'ogc_fid' (but don't hardcode either)
query = 'WITH feature AS ('
- query += 'SELECT m.' + common.escape_identifier(pkey_col) + ','
+ query += 'SELECT m.' + common.escape_identifier(pkey_col) + ' AS ogc_fid,'
for column_name in column_names:
query += 'm.' + common.escape_identifier(column_name) + ','
if force2d:
geom_col2d_esc = 'ST_Force2D(m.' + common.escape_identifier(geom_cols[0]) + ')'
- query += geom_col2d_esc + ' AS ' + common.escape_identifier(geom_cols[0]) + ','
else:
geom_col2d_esc = 'm.' + common.escape_identifier(geom_cols[0])
- query += geom_col2d_esc + ','
if d == 2:
query += 'ST_Area(' + geom_col2d_esc +') AS geom_area,'
query += 'ST_Perimeter(' + geom_col2d_esc +') AS geom_perimeter,'
@@ -164,18 +163,13 @@ def get_query_map(layernames : set[str]) -> dict[str,bytes]:
query += common.escape_identifier(layername) + ' m '
query += 'WHERE m.' + common.escape_identifier(pkey_col) + ' = %s'
query += ') '
- # letting PostgreSQL convert to UTF-8 is likely faster than if we were doing
- # it ourselves
- query += 'SELECT convert_to('
- query += 'ST_AsGeoJSON(feature.*,'
- query += 'geom_column=>' + common.escape_literal_str(geom_cols[0]) + ','
- query += 'pretty_bool=>\'f\'),'
- query += '\'UTF8\') AS "GeoJSON" '
- query += 'FROM feature'
+ # TODO[trixie] use json_serialize() from PostgreSQL 17 to avoid serializing on
+ # the Python side. (There is also row_to_json() which might be of interest if
+ # json not jsonb is needed.)
+ query += 'SELECT to_jsonb(feature) FROM feature'
# The query never returns more than one row since we filter on a single FID.
- # Don't try to be clever and batch queries in an IN set or ANY as we
- # want to preserve the order in the response (so the feature(s)
- # exactly under the cursor are returned first).
+ # TODO: batch queries using ANY[] or an IN set (the # consummer will then need
+ # to re-order the response)
ret[layername] = query.encode('utf-8')
return ret
@@ -243,7 +237,7 @@ def application(env, start_response) -> Iterator[bytes]:
first = False
else:
yield b','
- yield resp[0]
+ yield json_dumps(resp[0], ensure_ascii=False, separators=(',', ':')).encode('utf-8')
# the query never returns more than one row since we filter on a single FID
if first:
yield b'[]' # no match, empty response