Refactor PDAL pipeline for filter points, --auto-boundary config addition

pull/1355/head
Piero Toffanin 2021-10-13 13:54:16 -04:00
rodzic 43870b6411
commit d6c40929d4
7 zmienionych plików z 103 dodań i 98 usunięć

Wyświetl plik

@ -1 +1 @@
2.6.4
2.6.5

Wyświetl plik

@ -7,9 +7,43 @@ from opendm import system
from pyproj import CRS
from opendm.location import transformer
from opendm.utils import double_quote
from osgeo import ogr
from opendm.shots import get_origin
def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_offset=(0, 0)):
if not os.path.isfile(reconstruction_json):
raise IOError(reconstruction_json + " does not exist.")
with open(reconstruction_json) as f:
data = json.load(f)
reconstruction = data[0]
mp = ogr.Geometry(ogr.wkbMultiPoint)
for shot_image in reconstruction['shots']:
shot = reconstruction['shots'][shot_image]
if shot['gps_dop'] < 999999:
camera = reconstruction['cameras'][shot['camera']]
p = ogr.Geometry(ogr.wkbPoint)
origin = get_origin(shot)
p.AddPoint_2D(origin[0] + reconstruction_offset[0], origin[1] + reconstruction_offset[1])
mp.AddGeometry(p)
if mp.GetGeometryCount() < 3:
return None
convexhull = mp.ConvexHull()
boundary = convexhull.Buffer(buffer)
return load_boundary(boundary.ExportToJson())
def load_boundary(boundary_json, reproject_to_proj4=None):
with fiona.open(io.BytesIO(json.dumps(boundary_json).encode('utf-8')), 'r') as src:
if not isinstance(boundary_json, str):
boundary_json = json.dumps(boundary_json)
with fiona.open(io.BytesIO(boundary_json.encode('utf-8')), 'r') as src:
if len(src) != 1:
raise IOError("Boundary must have a single polygon (found: %s)" % len(src))
@ -51,6 +85,9 @@ def boundary_offset(boundary, reconstruction_offset):
return res
def as_polygon(boundary):
if boundary is None:
return None
return "POLYGON((" + ", ".join([" ".join(map(str, c)) for c in boundary]) + "))"
def export_to_bounds_files(boundary, proj4, bounds_json_file, bounds_gpkg_file):

Wyświetl plik

@ -308,11 +308,19 @@ def config(argv=None, parser=None):
metavar='<json>',
action=StoreValue,
type=path_or_json_string,
help='GeoJSON polygon defining the boundary of the reconstruction. '
help='GeoJSON polygon limiting the area of the reconstruction. '
'Can be specified either as path to a GeoJSON file or as a '
'JSON string representing the contents of a '
'GeoJSON file. Default: %(default)s')
parser.add_argument('--auto-boundary',
action=StoreTrue,
nargs=0,
default=False,
help='Automatically set a boundary using camera shot locations to limit the area of the reconstruction. '
'This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. '
'Default: %(default)s')
parser.add_argument('--pc-quality',
metavar='<string>',
action=StoreValue,

Wyświetl plik

@ -155,7 +155,7 @@ def run_pipeline(json, verbose=False):
cmd = [
'pdal',
'pipeline',
'-i %s' % jsonfile
'-i %s' % double_quote(jsonfile)
]
if verbose or sys.platform == 'win32':
system.run(' '.join(cmd))

Wyświetl plik

@ -1,4 +1,4 @@
import os, sys, shutil, tempfile, json, math
import os, sys, shutil, tempfile, math, json
from opendm import system
from opendm import log
from opendm import context
@ -8,6 +8,7 @@ from opendm import io
from opendm.concurrency import parallel_map
from opendm.utils import double_quote
from opendm.boundary import as_polygon
from opendm.dem.pdal import run_pipeline
def ply_info(input_ply):
if not os.path.exists(input_ply):
@ -139,37 +140,40 @@ def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=
shutil.rmtree(partsdir)
else:
# Process point cloud (or a point cloud submodel) in a single step
filterArgs = {
'inputFile': input_point_cloud,
'outputFile': output_point_cloud,
'stages': " ".join(filters),
'dims': dims
}
pipeline = []
cmd = ("pdal translate -i \"{inputFile}\" "
"-o \"{outputFile}\" "
"{stages} "
"--writers.ply.sized_types=false "
"--writers.ply.storage_mode=\"little endian\" "
"--writers.ply.dims=\"{dims}\" "
"").format(**filterArgs)
# Input
pipeline.append(input_point_cloud)
if 'sample' in filters:
cmd += "--filters.sample.radius={} ".format(sample_radius)
if 'outlier' in filters:
cmd += ("--filters.outlier.method=\"statistical\" "
"--filters.outlier.mean_k={} "
"--filters.outlier.multiplier={} ").format(meank, standard_deviation)
if 'range' in filters:
# Remove outliers
cmd += "--filters.range.limits=\"Classification![7:7]\" "
# Filters
for f in filters:
params = {}
if f == 'sample':
params = {'radius': sample_radius}
elif f == 'outlier':
params = {'method': 'statistical', 'mean_k': meank, 'multiplier': standard_deviation}
elif f == 'range':
params = {'limits': 'Classification![7:7]'}
elif f == 'crop':
params = {'polygon': as_polygon(boundary)}
else:
raise RuntimeError("Invalid filter in PDAL pipeline (this should not have happened, please report it: https://github.com/OpenDroneMap/ODM/issues")
pipeline.append(dict({
'type': "filters.%s" % f,
}, **params))
if 'crop' in filters:
cmd += "--filters.crop.polygon=\"%s\"" % as_polygon(boundary)
# Output
pipeline.append({
'type': 'writers.ply',
'sized_types': False,
'storage_mode': 'little endian',
'dims': dims,
'filename': output_point_cloud
})
system.run(cmd)
run_pipeline(pipeline, verbose=verbose)
if not os.path.exists(output_point_cloud):
log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))

Wyświetl plik

@ -6,7 +6,8 @@ from opendm import system
from opendm import context
from opendm import point_cloud
from opendm import types
from opendm.boundary import boundary_offset
from opendm import gsd
from opendm.boundary import boundary_offset, compute_boundary_from_shots
class ODMFilterPoints(types.ODM_Stage):
def process(self, args, outputs):
@ -22,6 +23,19 @@ class ODMFilterPoints(types.ODM_Stage):
else:
inputPointCloud = tree.openmvs_model
# Check if we need to compute boundary
if args.auto_boundary:
if reconstruction.is_georeferenced():
if not 'boundary' in outputs:
avg_gsd = gsd.opensfm_reconstruction_average_gsd(tree.opensfm_reconstruction)
outputs['boundary'] = compute_boundary_from_shots(tree.opensfm_reconstruction, avg_gsd * 20, reconstruction.get_proj_offset()) # 20 is arbitrary
if outputs['boundary'] is None:
log.ODM_WARNING("Cannot compute boundary from camera shots")
else:
log.ODM_WARNING("--auto-boundary set but so is --boundary, will use --boundary")
else:
log.ODM_WARNING("Not a georeferenced reconstruction, will ignore --auto-boundary")
point_cloud.filter(inputPointCloud, tree.filtered_point_cloud,
standard_deviation=args.pc_filter,
sample_radius=args.pc_sample,
@ -29,6 +43,13 @@ class ODMFilterPoints(types.ODM_Stage):
verbose=args.verbose,
max_concurrency=args.max_concurrency)
# Quick check
info = point_cloud.ply_info(tree.filtered_point_cloud)
if info["vertex_count"] == 0:
extra_msg = ''
if 'boundary' in outputs:
extra_msg = '. Also, since you used a boundary setting, make sure that the boundary polygon you specified covers the reconstruction area correctly.'
raise system.ExitException("Uh oh! We ended up with an empty point cloud. This means that the reconstruction did not succeed. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying/%s" % extra_msg)
else:
log.ODM_WARNING('Found a valid point cloud file in: %s' %
tree.filtered_point_cloud)

Wyświetl plik

@ -105,71 +105,6 @@ class ODMSplitStage(types.ODM_Stage):
self.update_progress(50)
# TODO: this is currently not working and needs a champion to fix it
# https://community.opendronemap.org/t/filenotfound-error-cameras-json/6047/2
# resplit_done_file = octx.path('resplit_done.txt')
# if not io.file_exists(resplit_done_file) and bool(args.split_multitracks):
# submodels = mds.get_submodel_paths()
# i = 0
# for s in submodels:
# template = octx.path("../aligned_submodels/submodel_%04d")
# with open(s+"/reconstruction.json", "r") as f:
# j = json.load(f)
# for k in range(0, len(j)):
# v = j[k]
# path = template % i
# #Create the submodel path up to opensfm
# os.makedirs(path+"/opensfm")
# os.makedirs(path+"/images")
# #symlinks for common data
# images = os.listdir(octx.path("../images"))
# for image in images:
# os.symlink("../../../images/"+image, path+"/images/"+image)
# os.symlink("../../../opensfm/exif", path+"/opensfm/exif")
# os.symlink("../../../opensfm/features", path+"/opensfm/features")
# os.symlink("../../../opensfm/matches", path+"/opensfm/matches")
# os.symlink("../../../opensfm/reference_lla.json", path+"/opensfm/reference_lla.json")
# os.symlink("../../../opensfm/camera_models.json", path+"/opensfm/camera_models.json")
# shutil.copy(s+"/../cameras.json", path+"/cameras.json")
# shutil.copy(s+"/../images.json", path+"/images.json")
# with open(octx.path("config.yaml")) as f:
# doc = yaml.safe_load(f)
# dmcv = "depthmap_min_consistent_views"
# if dmcv in doc:
# if len(v["shots"]) < doc[dmcv]:
# doc[dmcv] = len(v["shots"])
# print("WARNING: Reduced "+dmcv+" to accommodate short track")
# with open(path+"/opensfm/config.yaml", "w") as f:
# yaml.dump(doc, f)
# #We need the original tracks file for the visualsfm export, since
# #there may still be point matches between the tracks
# shutil.copy(s+"/tracks.csv", path+"/opensfm/tracks.csv")
# #Create our new reconstruction file with only the relevant track
# with open(path+"/opensfm/reconstruction.json", "w") as o:
# json.dump([v], o)
# #Create image lists
# with open(path+"/opensfm/image_list.txt", "w") as o:
# o.writelines(list(map(lambda x: "../images/"+x+'\n', v["shots"].keys())))
# with open(path+"/img_list.txt", "w") as o:
# o.writelines(list(map(lambda x: x+'\n', v["shots"].keys())))
# i+=1
# os.rename(octx.path("../submodels"), octx.path("../unaligned_submodels"))
# os.rename(octx.path("../aligned_submodels"), octx.path("../submodels"))
# octx.touch(resplit_done_file)
mds = metadataset.MetaDataSet(tree.opensfm)
submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()]