Merge pull request #1156 from pierotofy/python3

ODM 2.0 - Migration to 18.04, Python 3.6, new features, bug fixes

Former-commit-id: ef174de8af8dbb87379fa882f20ede34c751aef1
pull/1161/head v2.0.0
Piero Toffanin 2020-09-21 11:03:09 -04:00 zatwierdzone przez GitHub
commit a9c5333a53
58 zmienionych plików z 3757 dodań i 582 usunięć

3
.gitignore vendored
Wyświetl plik

@ -24,4 +24,5 @@ ceres-solver.tar.gz
*.pyc
opencv.zip
settings.yaml
.setupdevenv
.setupdevenv
__pycache__

Wyświetl plik

@ -1,61 +1,10 @@
FROM phusion/baseimage:0.10.2 as base
FROM ubuntu:18.04
# Env variables
ENV DEBIAN_FRONTEND noninteractive
#Install dependencies and required requisites
RUN add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable \
&& add-apt-repository -y ppa:george-edison55/cmake-3.x \
&& apt-get update -y \
&& apt-get install --no-install-recommends -y \
build-essential \
cmake \
gdal-bin \
git \
libatlas-base-dev \
libavcodec-dev \
libavformat-dev \
libboost-date-time-dev \
libboost-filesystem-dev \
libboost-iostreams-dev \
libboost-log-dev \
libboost-python-dev \
libboost-regex-dev \
libboost-thread-dev \
libeigen3-dev \
libflann-dev \
libgdal-dev \
libgeotiff-dev \
libgoogle-glog-dev \
libgtk2.0-dev \
libjasper-dev \
libjpeg-dev \
libjsoncpp-dev \
liblapack-dev \
liblas-bin \
libpng-dev \
libproj-dev \
libsuitesparse-dev \
libswscale-dev \
libtbb2 \
libtbb-dev \
libtiff-dev \
libvtk6-dev \
libxext-dev \
python-dev \
python-gdal \
python-matplotlib \
python-pip \
python-software-properties \
python-wheel \
software-properties-common \
swig2.0 \
grass-core \
libssl-dev \
&& apt-get remove libdc1394-22-dev \
&& pip install --upgrade pip \
&& pip install setuptools
ENV PYTHONPATH "$PYTHONPATH:/code/SuperBuild/install/lib/python3.6/dist-packages"
ENV PYTHONPATH "$PYTHONPATH:/code/SuperBuild/src/opensfm"
ENV LD_LIBRARY_PATH "$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
WORKDIR /code
@ -63,24 +12,7 @@ WORKDIR /code
# Copy everything
COPY . ./
RUN pip install -r requirements.txt
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python2.7/dist-packages"
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/src/opensfm"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Compile code in SuperBuild and root directories
RUN rm -fr docker \
&& cd SuperBuild \
&& mkdir build \
&& cd build \
&& cmake .. \
&& make -j$(nproc) \
&& cd ../.. \
&& mkdir build \
&& cd build \
&& cmake .. \
&& make -j$(nproc)
RUN bash configure.sh install
# Cleanup APT
RUN apt-get clean \
@ -98,4 +30,4 @@ RUN rm -rf \
/code/SuperBuild/src/pdal
# Entry point
ENTRYPOINT ["python", "/code/run.py"]
ENTRYPOINT ["python3", "/code/run.py"]

Wyświetl plik

@ -75,9 +75,9 @@ See http://docs.opendronemap.org for tutorials and more guides.
We have a vibrant [community forum](https://community.opendronemap.org/). You can [search it](https://community.opendronemap.org/search?expanded=true) for issues you might be having with ODM and you can post questions there. We encourage users of ODM to partecipate in the forum and to engage with fellow drone mapping users.
## Native Install (Ubuntu 16.04)
## Native Install (Ubuntu 18.04)
You can run ODM natively on Ubuntu 16.04 LTS (although we don't recommend it):
You can run ODM natively on Ubuntu 18.04 LTS (although we don't recommend it):
1. Download the source from [here](https://github.com/OpenDroneMap/ODM/archive/master.zip)
2. Run `bash configure.sh install`

Wyświetl plik

@ -129,7 +129,7 @@ endforeach()
externalproject_add(mve
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
GIT_TAG 099
GIT_TAG 200
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/elibs/mve
CONFIGURE_COMMAND ""

Wyświetl plik

@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name}
GIT_REPOSITORY https://github.com/OpenDroneMap/mvs-texturing
GIT_TAG 101
GIT_TAG 200
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -16,7 +16,7 @@ ExternalProject_Add(${_proj_name}
CMAKE_ARGS
-DBUILD_TESTS=OFF
-DBUILD_PYTHON=ON
-DPYBIND11_PYTHON_VERSION=2.7
-DPYBIND11_PYTHON_VERSION=3.6
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}

Wyświetl plik

@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 100
GIT_TAG 200
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------
@ -18,7 +18,7 @@ ExternalProject_Add(${_proj_name}
-DCERES_ROOT_DIR=${SB_INSTALL_DIR}
-DOpenCV_DIR=${SB_INSTALL_DIR}/share/OpenCV
-DOPENSFM_BUILD_TESTS=off
-DPYTHON_EXECUTABLE=/usr/bin/python
-DPYTHON_EXECUTABLE=/usr/bin/python3
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------

Wyświetl plik

@ -8,7 +8,7 @@ ExternalProject_Add(${_proj_name}
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/PDAL/PDAL/archive/2.1.0.zip
URL https://github.com/PDAL/PDAL/archive/2.2.0.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -1 +1 @@
1.0.2
2.0.0

100
configure.sh 100755 → 100644
Wyświetl plik

@ -1,5 +1,20 @@
#!/bin/bash
check_version(){
UBUNTU_VERSION=$(lsb_release -r)
if [[ $UBUNTU_VERSION = *"18.04"* ]]; then
echo "Ubuntu: $UBUNTU_VERSION, good!"
elif [[ $UBUNTU_VERSION = *"16.04" ]]; then
echo "ODM 2.0 has upgraded to Ubuntu 18.04, but you're on 16.04"
echo "The last version of ODM that supports Ubuntu 16.04 is v1.0.2. We recommend you upgrade to Ubuntu 18.04, or better yet, use docker."
exit 1
else
echo "You are not on Ubuntu 18.04 (detected: $UBUNTU_VERSION)"
echo "It might be possible to run ODM on a newer version of Ubuntu, however, you cannot rely on this script."
exit 1
fi
}
if [[ $2 =~ ^[0-9]+$ ]] ; then
processes=$2
else
@ -7,63 +22,73 @@ else
fi
install() {
cd /code
## Set up library paths
export PYTHONPATH=$RUNPATH/SuperBuild/install/lib/python2.7/dist-packages:$RUNPATH/SuperBuild/src/opensfm:$PYTHONPATH
export DEBIAN_FRONTEND=noninteractive
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RUNPATH/SuperBuild/install/lib
## Before installing
## Before installing
echo "Updating the system"
add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
apt-get update
if ! command -v sudo &> /dev/null
then
echo "Installing sudo"
apt-get update && apt-get install -y sudo
fi
sudo apt-get update && sudo apt-get install software-properties-common lsb-release tzdata -y --no-install-recommends
# Check version
check_version
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
sudo apt-get update
echo "Installing Required Requisites"
apt-get install -y -qq build-essential \
sudo apt-get install -y -qq --no-install-recommends \
build-essential \
git \
cmake \
python-pip \
python3-pip \
libgdal-dev \
gdal-bin \
libgeotiff-dev \
pkg-config \
libjsoncpp-dev \
python-gdal \
python3-gdal \
python3-setuptools \
grass-core \
libssl-dev \
liblas-bin \
swig2.0 \
python-wheel \
swig3.0 \
python3-wheel \
libboost-log-dev
sudo pip3 install -U pip
echo "Getting CMake 3.1 for MVS-Texturing"
apt-get install -y software-properties-common python-software-properties
add-apt-repository -y ppa:george-edison55/cmake-3.x
apt-get update -y
apt-get install -y --only-upgrade cmake
echo "Installing OpenCV Dependencies"
apt-get install -y -qq libgtk2.0-dev \
sudo apt-get install -y -qq --no-install-recommends libgtk2.0-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
python-dev \
python3-dev \
libtbb2 \
libtbb-dev \
libjpeg-dev \
libpng-dev \
libtiff-dev \
libjasper-dev \
libflann-dev \
libproj-dev \
libxext-dev \
liblapack-dev \
libeigen3-dev \
libvtk6-dev
echo "Removing libdc1394-22-dev due to python opencv issue"
apt-get remove libdc1394-22-dev
sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
sudo apt-get update
sudo apt-get install -y -qq --no-install-recommends libjasper1 \
libjasper-dev
echo "Installing OpenSfM Dependencies"
apt-get install -y -qq libgoogle-glog-dev \
sudo apt-get install -y -qq --no-install-recommends libgoogle-glog-dev \
libsuitesparse-dev \
libboost-filesystem-dev \
libboost-iostreams-dev \
@ -72,12 +97,22 @@ install() {
libboost-date-time-dev \
libboost-thread-dev
pip install -r "${RUNPATH}/requirements.txt"
pip install -r requirements.txt
# Fix: /usr/local/lib/python2.7/dist-packages/requests/__init__.py:83: RequestsDependencyWarning: Old version of cryptography ([1, 2, 3]) may cause slowdown.
pip install --upgrade cryptography
python -m easy_install --upgrade pyOpenSSL
if [ ! -z "$PORTABLE_INSTALL" ]; then
echo "Replacing g++ and gcc with our scripts for portability..."
if [ ! -e /usr/bin/gcc_real ]; then
sudo mv -v /usr/bin/gcc /usr/bin/gcc_real
sudo cp -v ./docker/gcc /usr/bin/gcc
fi
if [ ! -e /usr/bin/g++_real ]; then
sudo mv -v /usr/bin/g++ /usr/bin/g++_real
sudo cp -v ./docker/g++ /usr/bin/g++
fi
fi
set -eo pipefail
echo "Compiling SuperBuild"
cd ${RUNPATH}/SuperBuild
mkdir -p build && cd build
@ -87,11 +122,13 @@ install() {
cd ${RUNPATH}
mkdir -p build && cd build
cmake .. && make -j$processes
echo "Configuration Finished"
}
uninstall() {
check_version
echo "Removing SuperBuild and build directories"
cd ${RUNPATH}/SuperBuild
rm -rfv build src download install
@ -100,11 +137,12 @@ uninstall() {
}
reinstall() {
check_version
echo "Reinstalling ODM modules"
uninstall
install
}
usage() {
echo "Usage:"
echo "bash configure.sh <install|update|uninstall|help> [nproc]"
@ -120,7 +158,7 @@ usage() {
echo "[nproc] is an optional argument that can set the number of processes for the make -j tag. By default it uses $(nproc)"
}
if [[ $1 =~ ^(install|reinstall|uninstall|usage)$ ]]; then
if [[ $1 =~ ^(install|reinstall|uninstall)$ ]]; then
RUNPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"$1"
else

Wyświetl plik

@ -1,128 +0,0 @@
#!/bin/bash
if [[ $2 =~ ^[0-9]+$ ]] ; then
processes=$2
else
processes=$(nproc)
fi
install() {
cd /code
## Set up library paths
export PYTHONPATH=$RUNPATH/SuperBuild/install/lib/python2.7/dist-packages:$RUNPATH/SuperBuild/src/opensfm:$PYTHONPATH
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RUNPATH/SuperBuild/install/lib
## Before installing
echo "Updating the system"
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
echo "Installing Required Requisites"
sudo apt-get install -y -qq build-essential \
git \
cmake \
python-pip \
libgdal-dev \
gdal-bin \
libgeotiff-dev \
pkg-config \
libjsoncpp-dev \
python-gdal \
grass-core \
libssl-dev \
liblas-bin \
swig3.0 \
python-wheel \
libboost-log-dev
echo "Installing OpenCV Dependencies"
sudo apt-get install -y -qq libgtk2.0-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
python-dev \
libtbb2 \
libtbb-dev \
libjpeg-dev \
libpng-dev \
libtiff-dev \
libflann-dev \
libproj-dev \
libxext-dev \
liblapack-dev \
libeigen3-dev \
libvtk6-dev
sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
sudo apt-get update
sudo apt-get install -y -qq libjasper1 \
libjasper-dev
echo "Installing OpenSfM Dependencies"
sudo apt-get install -y -qq libgoogle-glog-dev \
libsuitesparse-dev \
libboost-filesystem-dev \
libboost-iostreams-dev \
libboost-regex-dev \
libboost-python-dev \
libboost-date-time-dev \
libboost-thread-dev
pip install -r "/code/requirements.txt"
# Fix: /usr/local/lib/python2.7/dist-packages/requests/__init__.py:83: RequestsDependencyWarning: Old version of cryptography ([1, 2, 3]) may cause slowdown.
pip install --upgrade cryptography
python -m easy_install --upgrade pyOpenSSL
echo "Compiling SuperBuild"
cd ${RUNPATH}/SuperBuild
mkdir -p build && cd build
cmake .. && make -j$processes
echo "Compiling build"
cd ${RUNPATH}
mkdir -p build && cd build
cmake .. && make -j$processes
echo "Configuration Finished"
}
uninstall() {
echo "Removing SuperBuild and build directories"
cd ${RUNPATH}/SuperBuild
rm -rfv build src download install
cd ../
rm -rfv build
}
reinstall() {
echo "Reinstalling ODM modules"
uninstall
install
}
usage() {
echo "Usage:"
echo "bash configure.sh <install|update|uninstall|help> [nproc]"
echo "Subcommands:"
echo " install"
echo " Installs all dependencies and modules for running OpenDroneMap"
echo " reinstall"
echo " Removes SuperBuild and build modules, then re-installs them. Note this does not update OpenDroneMap to the latest version. "
echo " uninstall"
echo " Removes SuperBuild and build modules. Does not uninstall dependencies"
echo " help"
echo " Displays this message"
echo "[nproc] is an optional argument that can set the number of processes for the make -j tag. By default it uses $(nproc)"
}
if [[ $1 =~ ^(install|reinstall|uninstall)$ ]]; then
RUNPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"$1"
else
echo "Invalid instructions." >&2
usage
exit 1
fi

Wyświetl plik

@ -1,23 +0,0 @@
#!/bin/sh
#
# An example hook script to verify what is about to be committed.
# Called by "git commit" with no arguments. The hook should
# exit with non-zero status after issuing an appropriate message if
# it wants to stop the commit.
#
# To enable this hook, rename this file to "pre-commit".
exec 1>&2
echo "RUNNING PRE-COMMIT"
EXIT_CODE=0
# Get list of files about to be committed
if git diff --cached --name-only --diff-filter=ACM | grep 'ccd_defs.json'; then
echo "We changed ccd_defs.json"
GIT_ROOT=$(git rev-parse --show-toplevel)
python $GIT_ROOT/ccd_defs_check.py
EXIT_CODE=$(echo $?)
fi
# non-zero exit fails the commit
exit $EXIT_CODE

Wyświetl plik

@ -1 +0,0 @@
305acb70d8d2c350a8374fbb5028d914facf3fa4

Plik binarny nie jest wyświetlany.

Przed

Szerokość:  |  Wysokość:  |  Rozmiar: 27 KiB

Wyświetl plik

@ -1 +0,0 @@
de3d398070f83430c950ae84845fd85b504b5452

Wyświetl plik

@ -63,8 +63,10 @@ def parallel_map(func, items, max_workers=1):
t.start()
threads.append(t)
i = 1
for t in items:
pq.put((i, t.copy()))
i += 1
def stop_workers():
for i in range(len(threads)):

Wyświetl plik

@ -60,16 +60,18 @@ class StoreValue(argparse.Action):
setattr(namespace, self.dest, values)
setattr(namespace, self.dest + '_is_set', True)
parser = SettingsParser(description='OpenDroneMap',
usage='%(prog)s [options] <project name>',
yaml_file=open(context.settings_path))
args = None
def config(argv=None):
def config(argv=None, parser=None):
global args
if args is not None and argv is None:
return args
if parser is None:
parser = SettingsParser(description='ODM',
usage='%(prog)s [options] <project name>',
yaml_file=open(context.settings_path))
parser.add_argument('--project-path',
metavar='<path>',
@ -89,9 +91,9 @@ def config(argv=None):
action=StoreValue,
default=2048,
type=int,
help='Resizes images by the largest side for feature extraction purposes only. '
help='Legacy option (use --feature-quality instead). Resizes images by the largest side for feature extraction purposes only. '
'Set to -1 to disable. This does not affect the final orthophoto '
' resolution quality and will not resize the original images. Default: %(default)s')
'resolution quality and will not resize the original images. Default: %(default)s')
parser.add_argument('--end-with', '-e',
metavar='<string>',
@ -143,7 +145,16 @@ def config(argv=None):
default='sift',
choices=['sift', 'hahog'],
help=('Choose the algorithm for extracting keypoints and computing descriptors. '
'Can be one of: [sift, hahog]. Default: '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--feature-quality',
metavar='<string>',
action=StoreValue,
default='high',
choices=['ultra', 'high', 'medium', 'low', 'lowest'],
help=('Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--matcher-neighbors',
@ -194,7 +205,7 @@ def config(argv=None):
help=('Set a camera projection type. Manually setting a value '
'can help improve geometric undistortion. By default the application '
'tries to determine a lens type from the images metadata. Can be '
'set to one of: [auto, perspective, brown, fisheye, spherical]. Default: '
'set to one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--radiometric-calibration',
@ -207,7 +218,7 @@ def config(argv=None):
'to obtain reflectance values (otherwise you will get digital number values). '
'[camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found). '
'[camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. '
'Can be set to one of: [none, camera, camera+sun]. Default: '
'Can be set to one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--max-concurrency',
@ -512,11 +523,25 @@ def config(argv=None):
metavar='<path string>',
action=StoreValue,
default=None,
help=('path to the file containing the ground control '
help=('Path to the file containing the ground control '
'points used for georeferencing. Default: '
'%(default)s. The file needs to '
'be on the following line format: \neasting '
'northing height pixelrow pixelcol imagename'))
'use the following format: \n'
'EPSG:<code> or <+proj definition>\n'
'geo_x geo_y geo_z im_x im_y image_name [gcp_name] [extra1] [extra2]'))
parser.add_argument('--geo',
metavar='<path string>',
action=StoreValue,
default=None,
help=('Path to the image geolocation file containing the camera center coordinates used for georeferencing. '
'Note that omega/phi/kappa are currently not supported (you can set them to 0). '
'Default: '
'%(default)s. The file needs to '
'use the following format: \n'
'EPSG:<code> or <+proj definition>\n'
'image_name geo_x geo_y geo_z [omega (degrees)] [phi (degrees)] [kappa (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]'
''))
parser.add_argument('--use-exif',
action=StoreTrue,
@ -606,9 +631,7 @@ def config(argv=None):
type=str,
choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'],
default='DEFLATE',
help='Set the compression to use. Note that this could '
'break gdal_translate if you don\'t know what you '
'are doing. Options: %(choices)s.\nDefault: %(default)s')
help='Set the compression to use for orthophotos. Options: %(choices)s.\nDefault: %(default)s')
parser.add_argument('--orthophoto-cutline',
action=StoreTrue,
@ -620,6 +643,14 @@ def config(argv=None):
'Default: '
'%(default)s')
parser.add_argument('--tiles',
action=StoreTrue,
nargs=0,
default=False,
help='Generate static tiles for orthophotos and DEMs that are '
'suitable for viewers like Leaflet or OpenLayers. '
'Default: %(default)s')
parser.add_argument('--build-overviews',
action=StoreTrue,
nargs=0,
@ -649,7 +680,7 @@ def config(argv=None):
parser.add_argument('--version',
action='version',
version='OpenDroneMap {0}'.format(__version__),
version='ODM {0}'.format(__version__),
help='Displays version number and exits. ')
parser.add_argument('--split',
@ -742,7 +773,7 @@ def config(argv=None):
if not args.project_path:
log.ODM_ERROR('You need to set the project path in the '
'settings.yaml file before you can run ODM, '
'or use `--project-path <path>`. Run `python '
'or use `--project-path <path>`. Run `python3 '
'run.py --help` for more information. ')
sys.exit(1)

Wyświetl plik

@ -1,6 +1,5 @@
import os
import sys
from opendm import io
import multiprocessing
# Define some needed locations
@ -12,9 +11,15 @@ superbuild_bin_path = os.path.join(superbuild_path, 'install', 'bin')
tests_path = os.path.join(root_path, 'tests')
tests_data_path = os.path.join(root_path, 'tests/test_data')
# add opencv to python path
pyopencv_path = os.path.join(superbuild_path, 'install/lib/python2.7/dist-packages')
sys.path.append(pyopencv_path)
# add opencv,opensfm to python path
python_packages_paths = [os.path.join(superbuild_path, p) for p in [
'install/lib/python3.6/dist-packages',
'install/lib/python3/dist-packages',
'src/opensfm'
]]
for p in python_packages_paths:
sys.path.append(p)
# define opensfm path
opensfm_path = os.path.join(superbuild_path, "src/opensfm")
@ -46,7 +51,12 @@ odm_modules_src_path = os.path.join(root_path, "modules")
settings_path = os.path.join(root_path, 'settings.yaml')
# Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff'}
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp'}
# Define the number of cores
num_cores = multiprocessing.cpu_count()
# Print python paths if invoked as a script
if __name__ == "__main__":
print("export PYTHONPATH=" + ":".join(python_packages_paths))

Wyświetl plik

@ -60,7 +60,7 @@ class Cropper:
os.remove(original_geotiff)
except Exception as e:
log.ODM_WARNING('Something went wrong while cropping: {}'.format(e.message))
log.ODM_WARNING('Something went wrong while cropping: {}'.format(e))
# Revert rename
os.rename(original_geotiff, geotiff_path)
@ -189,8 +189,14 @@ class Cropper:
BUFFER_SMOOTH_DISTANCE = 3
if buffer_distance > 0:
convexhull = convexhull.Buffer(-(buffer_distance + BUFFER_SMOOTH_DISTANCE))
convexhull = convexhull.Buffer(BUFFER_SMOOTH_DISTANCE)
# For small areas, check that buffering doesn't obliterate
# our hull
tmp = convexhull.Buffer(-(buffer_distance + BUFFER_SMOOTH_DISTANCE))
tmp = tmp.Buffer(BUFFER_SMOOTH_DISTANCE)
if tmp.Area() > 0:
convexhull = tmp
else:
log.ODM_WARNING("Very small crop area detected, we will not smooth it.")
# Save to a new file
bounds_geojson_path = self.path('bounds.geojson')

Wyświetl plik

@ -9,7 +9,7 @@ from opendm.system import run
from opendm import point_cloud
from opendm import io
from opendm import system
from opendm.concurrency import get_max_memory
from opendm.concurrency import get_max_memory, parallel_map
from scipy import ndimage
from datetime import datetime
from opendm import log
@ -81,8 +81,6 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
apply_smoothing=True):
""" Create DEM from multiple radii, and optionally gapfill """
# TODO: refactor to use concurrency.parallel_map
global error
error = None
@ -164,7 +162,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
# Sort tiles by increasing radius
tiles.sort(key=lambda t: float(t['radius']), reverse=True)
def process_one(q):
def process_tile(q):
log.ODM_INFO("Generating %s (%s, radius: %s, resolution: %s)" % (q['filename'], output_type, q['radius'], resolution))
d = pdal.json_gdal_base(q['filename'], output_type, q['radius'], resolution, q['bounds'])
@ -178,63 +176,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
pdal.json_add_readers(d, [input_point_cloud])
pdal.run_pipeline(d, verbose=verbose)
def worker():
global error
while True:
(num, q) = pq.get()
if q is None or error is not None:
pq.task_done()
break
try:
process_one(q)
except Exception as e:
error = e
finally:
pq.task_done()
if max_workers > 1:
use_single_thread = False
pq = queue.PriorityQueue()
threads = []
for i in range(max_workers):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
for t in tiles:
pq.put((i, t.copy()))
def stop_workers():
for i in range(len(threads)):
pq.put((-1, None))
for t in threads:
t.join()
# block until all tasks are done
try:
while pq.unfinished_tasks > 0:
time.sleep(0.5)
except KeyboardInterrupt:
print("CTRL+C terminating...")
stop_workers()
sys.exit(1)
stop_workers()
if error is not None:
# Try to reprocess using a single thread
# in case this was a memory error
log.ODM_WARNING("DEM processing failed with multiple threads, let's retry with a single thread...")
use_single_thread = True
else:
use_single_thread = True
if use_single_thread:
# Boring, single thread processing
for q in tiles:
process_one(q)
parallel_map(process_tile, tiles, max_workers)
output_file = "%s.tif" % dem_type
output_path = os.path.abspath(os.path.join(outdir, output_file))
@ -294,11 +236,15 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, geotiff_tmp_path))
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co TILED=YES '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'{merged_vrt} {geotiff}'.format(**kwargs))
else:
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co TILED=YES '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'{tiles_vrt} {geotiff}'.format(**kwargs))

Wyświetl plik

@ -148,7 +148,7 @@ def run_pipeline(json, verbose=False):
f, jsonfile = tempfile.mkstemp(suffix='.json')
if verbose:
log.ODM_INFO('Pipeline file: %s' % jsonfile)
os.write(f, jsonlib.dumps(json))
os.write(f, jsonlib.dumps(json).encode('utf8'))
os.close(f)
cmd = [

Wyświetl plik

@ -2,7 +2,7 @@
def get_dem_vars(args):
return {
'TILED': 'YES',
'COMPRESS': 'LZW',
'COMPRESS': 'DEFLATE',
'BLOCKXSIZE': 512,
'BLOCKYSIZE': 512,
'BIGTIFF': 'IF_SAFER',

Wyświetl plik

@ -15,9 +15,9 @@ class GCPFile:
def read(self):
if self.exists():
with open(self.gcp_path, 'r') as f:
contents = f.read().decode('utf-8-sig').encode('utf-8').strip()
contents = f.read().strip()
lines = map(str.strip, contents.split('\n'))
lines = list(map(str.strip, contents.split('\n')))
if lines:
self.raw_srs = lines[0] # SRS
self.srs = location.parse_srs_header(self.raw_srs)
@ -104,7 +104,7 @@ class GCPFile:
if os.path.exists(gcp_file_output):
os.remove(gcp_file_output)
files = map(os.path.basename, glob.glob(os.path.join(images_dir, "*")))
files = list(map(os.path.basename, glob.glob(os.path.join(images_dir, "*"))))
output = [self.raw_srs]
files_found = 0

80
opendm/geo.py 100644
Wyświetl plik

@ -0,0 +1,80 @@
import os
from opendm import log
from opendm import location
from pyproj import CRS
class GeoFile:
def __init__(self, geo_path):
self.geo_path = geo_path
self.entries = {}
self.srs = None
with open(self.geo_path, 'r') as f:
contents = f.read().strip()
lines = list(map(str.strip, contents.split('\n')))
if lines:
self.raw_srs = lines[0] # SRS
self.srs = location.parse_srs_header(self.raw_srs)
longlat = CRS.from_epsg("4326")
for line in lines[1:]:
if line != "" and line[0] != "#":
parts = line.split()
if len(parts) >= 3:
i = 3
filename = parts[0]
x, y = [float(p) for p in parts[1:3]]
z = float(parts[3]) if len(parts) >= 4 else None
# Always convert coordinates to WGS84
if z is not None:
x, y, z = location.transform3(self.srs, longlat, x, y, z)
else:
x, y = location.transform2(self.srs, longlat, x, y)
omega = phi = kappa = None
if len(parts) >= 7:
omega, phi, kappa = [float(p) for p in parts[4:7]]
i = 7
horizontal_accuracy = vertical_accuracy = None
if len(parts) >= 9:
horizontal_accuracy,vertical_accuracy = [float(p) for p in parts[7:9]]
i = 9
extras = " ".join(parts[i:])
self.entries[filename] = GeoEntry(filename, x, y, z,
omega, phi, kappa,
horizontal_accuracy, vertical_accuracy,
extras)
else:
logger.warning("Malformed geo line: %s" % line)
def get_entry(self, filename):
return self.entries.get(filename)
class GeoEntry:
def __init__(self, filename, x, y, z, omega=None, phi=None, kappa=None, horizontal_accuracy=None, vertical_accuracy=None, extras=None):
self.filename = filename
self.x = x
self.y = y
self.z = z
self.omega = omega
self.phi = phi
self.kappa = kappa
self.horizontal_accuracy = horizontal_accuracy
self.vertical_accuracy = vertical_accuracy
self.extras = extras
def __str__(self):
return "{} ({} {} {}) ({} {} {}) ({} {}) {}".format(self.filename,
self.x, self.y, self.z,
self.omega, self.phi, self.kappa,
self.horizontal_accuracy, self.vertical_accuracy,
self.extras).rstrip()
def position_string(self):
return "{} {} {}".format(self.x, self.y, self.z)

Wyświetl plik

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
############################################################################
#

Wyświetl plik

@ -23,7 +23,7 @@ i.cutlinesmod.py --overwrite input=ortho output=cutline number_lines=${number_li
v.select ainput=cutline binput=crop_area output=result operator=within
# Export
v.out.ogr input=result output="result.gpkg" format=GPKG
v.out.ogr input=result output="result.gpkg" format=GPKG --overwrite
# Merge all geometries, select only the largest one (remove islands)
ogr2ogr -f GPKG -overwrite -explodecollections -dialect SQLite -sql "SELECT ST_Union(geom) FROM result ORDER BY ST_AREA(geom) DESC LIMIT 1" cutline.gpkg result.gpkg

Wyświetl plik

@ -6,6 +6,7 @@ import sys
import time
from opendm import log
from opendm import system
import locale
from string import Template
@ -94,18 +95,19 @@ class GrassContext:
log.ODM_INFO("Executing grass script from {}: {} --tmp-location {} --exec bash script.sh".format(self.get_cwd(), self.grass_binary, self.location))
env = os.environ.copy()
env["GRASS_ADDON_PATH"] = env.get("GRASS_ADDON_PATH", "") + os.path.abspath(os.path.join("opendm/grass/addons"))
env["LC_ALL"] = "C.UTF-8"
filename = os.path.join(self.get_cwd(), 'output.log')
with open(filename, 'wb') as writer, open(filename, 'rb', 1) as reader:
p = subprocess.Popen([self.grass_binary, '--tmp-location', self.location, '--exec', 'bash', 'script.sh'],
cwd=self.get_cwd(), stdout=subprocess.PIPE, stderr=writer, env=env)
while p.poll() is None:
sys.stdout.write(reader.read())
sys.stdout.write(reader.read().decode('utf8'))
time.sleep(0.5)
# Read the remaining
sys.stdout.write(reader.read())
sys.stdout.write(reader.read().decode('utf8'))
out, err = p.communicate()
out = out.decode('utf-8').strip()

Wyświetl plik

@ -2,19 +2,10 @@ import os
import shutil, errno
import json
def get_files_list(path_dir):
return os.listdir(path_dir)
def absolute_path_file(path_file):
return os.path.abspath(path_file)
def extract_file_from_path_file(path_file):
path, file = os.path.split(path_file)
return file
def extract_path_from_file(file):
path_file = os.path.abspath(os.path.dirname(file))
path, file = os.path.split(path_file)

Wyświetl plik

@ -20,15 +20,16 @@ def extract_utm_coords(photos, images_path, output_coords_file):
coords = []
reference_photo = None
for photo in photos:
if photo.latitude is None or photo.longitude is None or photo.altitude is None:
log.ODM_ERROR("Failed parsing GPS position for %s, skipping" % photo.filename)
if photo.latitude is None or photo.longitude is None:
log.ODM_WARNING("GPS position not available for %s" % photo.filename)
continue
if utm_zone is None:
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(photo.longitude, photo.latitude)
try:
coord = convert_to_utm(photo.longitude, photo.latitude, photo.altitude, utm_zone, hemisphere)
alt = photo.altitude if photo.altitude is not None else 0
coord = convert_to_utm(photo.longitude, photo.latitude, alt, utm_zone, hemisphere)
except:
raise Exception("Failed to convert GPS position to UTM for %s" % photo.filename)
@ -74,6 +75,8 @@ def proj_srs_convert(srs):
proj4 = srs.to_proj4()
res.ImportFromProj4(proj4)
res.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return res
def transformer(from_srs, to_srs):

Wyświetl plik

@ -4,7 +4,7 @@ from opendm.dem import commands
from opendm import system
from opendm import log
from opendm import context
from scipy import signal, ndimage
from scipy import signal
import numpy as np
def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, verbose=False, available_cores=None, method='gridded', smooth_dsm=True):
@ -26,7 +26,7 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
inPointCloud,
'mesh_dsm',
output_type='max',
radiuses=map(str, radius_steps),
radiuses=list(map(str, radius_steps)),
gapfill=True,
outdir=tmp_directory,
resolution=dsm_resolution,

Wyświetl plik

@ -7,10 +7,11 @@ import math
import numpy as np
import rasterio
import fiona
from scipy import ndimage
from edt import edt
from rasterio.transform import Affine, rowcol
from rasterio.mask import mask
from opendm import io
from opendm.tiles.tiler import generate_orthophoto_tiles
def get_orthophoto_vars(args):
return {
@ -42,7 +43,7 @@ def generate_png(orthophoto_file):
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, orthophoto_png, get_max_memory()))
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file):
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir):
if args.crop > 0:
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
@ -52,6 +53,9 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file):
if args.orthophoto_png:
generate_png(orthophoto_file)
if args.tiles:
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency)
def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False):
if not os.path.exists(input_raster):
@ -87,7 +91,7 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance
if out_image.shape[0] >= 4:
# alpha_band = rast.dataset_mask()
alpha_band = out_image[-1]
dist_t = ndimage.distance_transform_edt(alpha_band)
dist_t = edt(alpha_band, black_border=True, parallel=0)
dist_t[dist_t <= blend_distance] /= blend_distance
dist_t[dist_t > blend_distance] = 1
np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe")
@ -112,7 +116,7 @@ def feather_raster(input_raster, output_raster, blend_distance=20):
if blend_distance > 0:
if out_image.shape[0] >= 4:
alpha_band = out_image[-1]
dist_t = ndimage.distance_transform_edt(alpha_band)
dist_t = edt(alpha_band, black_border=True, parallel=0)
dist_t[dist_t <= blend_distance] /= blend_distance
dist_t[dist_t > blend_distance] = 1
np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe")

Wyświetl plik

@ -18,8 +18,7 @@ class OSFMContext:
self.opensfm_project_path = opensfm_project_path
def run(self, command):
# Use Python 2.x by default, otherwise OpenSfM uses Python 3.x
system.run('/usr/bin/env python2 %s/bin/opensfm %s "%s"' %
system.run('/usr/bin/env python3 %s/bin/opensfm %s "%s"' %
(context.opensfm_path, command, self.opensfm_project_path))
def is_reconstruction_done(self):
@ -104,39 +103,77 @@ class OSFMContext:
use_bow = True
# GPSDOP override if we have GPS accuracy information (such as RTK)
override_gps_dop = 'gps_accuracy_is_set' in args
for p in photos:
if p.get_gps_dop() is not None:
override_gps_dop = True
break
if 'gps_accuracy_is_set' in args:
log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)
if override_gps_dop:
log.ODM_INFO("Writing exif overrides")
exif_overrides = {}
for p in photos:
if 'gps_accuracy_is_set' in args:
log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)
dop = args.gps_accuracy
elif p.get_gps_dop() is not None:
dop = p.get_gps_dop()
else:
log.ODM_INFO("Looks like we have RTK accuracy info for some photos. Good! We'll use it.")
dop = args.gps_accuracy # default value
exif_overrides = {}
for p in photos:
dop = args.gps_accuracy if 'gps_accuracy_is_set' in args else p.get_gps_dop()
if dop is not None and p.latitude is not None and p.longitude is not None:
exif_overrides[p.filename] = {
'gps': {
'latitude': p.latitude,
'longitude': p.longitude,
'altitude': p.altitude if p.altitude is not None else 0,
'dop': dop,
}
if p.latitude is not None and p.longitude is not None:
exif_overrides[p.filename] = {
'gps': {
'latitude': p.latitude,
'longitude': p.longitude,
'altitude': p.altitude if p.altitude is not None else 0,
'dop': dop,
}
}
with open(os.path.join(self.opensfm_project_path, "exif_overrides.json"), 'w') as f:
f.write(json.dumps(exif_overrides))
with open(os.path.join(self.opensfm_project_path, "exif_overrides.json"), 'w') as f:
f.write(json.dumps(exif_overrides))
# Check image masks
masks = []
for p in photos:
if p.mask is not None:
masks.append((p.filename, os.path.join(images_path, p.mask)))
if masks:
log.ODM_INFO("Found %s image masks" % len(masks))
with open(os.path.join(self.opensfm_project_path, "mask_list.txt"), 'w') as f:
for fname, mask in masks:
f.write("{} {}\n".format(fname, mask))
# Compute feature_process_size
feature_process_size = 2048 # default
if 'resize_to_is_set' in args:
# Legacy
log.ODM_WARNING("Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead.")
feature_process_size = int(args.resize_to)
else:
feature_quality_scale = {
'ultra': 1,
'high': 0.5,
'medium': 0.25,
'low': 0.125,
'lowest': 0.0675,
}
# Find largest photo dimension
max_dim = 0
for p in photos:
if p.width is None:
continue
max_dim = max(max_dim, max(p.width, p.height))
if max_dim > 0:
log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim))
feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality])
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
# create config file for OpenSfM
config = [
"use_exif_size: no",
"flann_algorithm: KDTREE", # more stable, faster than KMEANS
"feature_process_size: %s" % args.resize_to,
"feature_process_size: %s" % feature_process_size,
"feature_min_frames: %s" % args.min_num_features,
"processes: %s" % args.max_concurrency,
"matching_gps_neighbors: %s" % matcher_neighbors,
@ -317,7 +354,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
:return the same as argv, but removing references to --split,
setting/replacing --project-path and name
removing --rerun-from, --rerun, --rerun-all, --sm-cluster
removing --pc-las, --pc-csv, --pc-ept flags (processing these is wasteful)
removing --pc-las, --pc-csv, --pc-ept, --tiles flags (processing these is wasteful)
adding --orthophoto-cutline
adding --dem-euclidean-map
adding --skip-3dmodel (split-merge does not support 3D model merging)
@ -326,7 +363,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
reading the contents of --cameras
"""
assure_always = ['orthophoto_cutline', 'dem_euclidean_map', 'skip_3dmodel']
remove_always = ['split', 'split_overlap', 'rerun_from', 'rerun', 'gcp', 'end_with', 'sm_cluster', 'rerun_all', 'pc_csv', 'pc_las', 'pc_ept']
remove_always = ['split', 'split_overlap', 'rerun_from', 'rerun', 'gcp', 'end_with', 'sm_cluster', 'rerun_all', 'pc_csv', 'pc_las', 'pc_ept', 'tiles']
read_json_always = ['cameras']
argv = sys.argv

Wyświetl plik

@ -1,6 +1,6 @@
import io
import logging
import re
import os
import exifread
import numpy as np
@ -8,19 +8,21 @@ from six import string_types
from datetime import datetime, timedelta
import pytz
import log
import system
from opendm import io
from opendm import log
from opendm import system
import xmltodict as x2d
from opendm import get_image_size
from xml.parsers.expat import ExpatError
class ODM_Photo:
""" ODMPhoto - a class for ODMPhotos
"""
"""ODMPhoto - a class for ODMPhotos"""
def __init__(self, path_file):
self.filename = os.path.basename(path_file)
self.mask = None
# Standard tags (virtually all photos have these)
self.filename = io.extract_file_from_path_file(path_file)
self.width = None
self.height = None
self.camera_make = ''
@ -76,6 +78,19 @@ class ODM_Photo:
self.filename, self.camera_make, self.camera_model, self.width, self.height,
self.latitude, self.longitude, self.altitude, self.band_name, self.band_index)
def set_mask(self, mask):
self.mask = mask
def update_with_geo_entry(self, geo_entry):
self.latitude = geo_entry.y
self.longitude = geo_entry.x
self.altitude = geo_entry.z
self.dls_yaw = geo_entry.omega
self.dls_pitch = geo_entry.phi
self.dls_roll = geo_entry.kappa
self.gps_xy_stddev = geo_entry.horizontal_accuracy
self.gps_z_stddev = geo_entry.vertical_accuracy
def parse_exif_values(self, _path_file):
# Disable exifread log
logging.getLogger('exifread').setLevel(logging.CRITICAL)
@ -85,13 +100,13 @@ class ODM_Photo:
try:
if 'Image Make' in tags:
try:
self.camera_make = tags['Image Make'].values.encode('utf8')
self.camera_make = tags['Image Make'].values
except UnicodeDecodeError:
log.ODM_WARNING("EXIF Image Make might be corrupted")
self.camera_make = "unknown"
if 'Image Model' in tags:
try:
self.camera_model = tags['Image Model'].values.encode('utf8')
self.camera_model = tags['Image Model'].values
except UnicodeDecodeError:
log.ODM_WARNING("EXIF Image Model might be corrupted")
self.camera_model = "unknown"
@ -129,7 +144,7 @@ class ODM_Photo:
if 'Image BitsPerSample' in tags:
self.bits_per_sample = self.int_value(tags['Image BitsPerSample'])
if 'EXIF DateTimeOriginal' in tags:
str_time = tags['EXIF DateTimeOriginal'].values.encode('utf8')
str_time = tags['EXIF DateTimeOriginal'].values
utc_time = datetime.strptime(str_time, "%Y:%m:%d %H:%M:%S")
subsec = 0
if 'EXIF SubSecTime' in tags:
@ -146,7 +161,7 @@ class ODM_Photo:
epoch = timezone.localize(datetime.utcfromtimestamp(0))
self.utc_time = (timezone.localize(utc_time) - epoch).total_seconds() * 1000.0
except Exception as e:
log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (_path_file, e.message))
log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (_path_file, str(e)))
# Extract XMP tags
@ -262,15 +277,15 @@ class ODM_Photo:
# From https://github.com/mapillary/OpenSfM/blob/master/opensfm/exif.py
def get_xmp(self, file):
img_str = str(file.read())
xmp_start = img_str.find('<x:xmpmeta')
xmp_end = img_str.find('</x:xmpmeta')
img_bytes = file.read()
xmp_start = img_bytes.find(b'<x:xmpmeta')
xmp_end = img_bytes.find(b'</x:xmpmeta')
if xmp_start < xmp_end:
xmp_str = img_str[xmp_start:xmp_end + 12]
xmp_str = img_bytes[xmp_start:xmp_end + 12].decode('utf8')
try:
xdict = x2d.parse(xmp_str)
except ExpatError:
except ExpatError as e:
from bs4 import BeautifulSoup
xmp_str = str(BeautifulSoup(xmp_str, 'xml'))
xdict = x2d.parse(xmp_str)
@ -297,7 +312,7 @@ class ODM_Photo:
def float_values(self, tag):
if isinstance(tag.values, list):
return map(lambda v: float(v.num) / float(v.den), tag.values)
return [float(v.num) / float(v.den) for v in tag.values]
else:
return [float(tag.values.num) / float(tag.values.den)]
@ -308,7 +323,7 @@ class ODM_Photo:
def int_values(self, tag):
if isinstance(tag.values, list):
return map(int, tag.values)
return [int(v) for v in tag.values]
else:
return [int(tag.values)]

Wyświetl plik

@ -10,13 +10,13 @@ from pipes import quote
def ply_info(input_ply):
if not os.path.exists(input_ply):
return False
raise IOError("%s does not exist" % input_ply)
# Read PLY header, check if point cloud has normals
has_normals = False
vertex_count = 0
with open(input_ply, 'r') as f:
with open(input_ply, 'r', errors='ignore') as f:
line = f.readline().strip().lower()
i = 0
while line != "end_header":
@ -251,10 +251,10 @@ def fast_merge_ply(input_point_cloud_files, output_file):
vertex_count = sum([ply_info(pcf)['vertex_count'] for pcf in input_point_cloud_files])
master_file = input_point_cloud_files[0]
with open(output_file, "wb") as out:
with open(master_file, "r") as fhead:
with open(master_file, "r", errors="ignore") as fhead:
# Copy header
line = fhead.readline()
out.write(line)
out.write(line.encode('utf8'))
i = 0
while line.strip().lower() != "end_header":
@ -262,9 +262,9 @@ def fast_merge_ply(input_point_cloud_files, output_file):
# Intercept element vertex field
if line.lower().startswith("element vertex "):
out.write("element vertex %s\n" % vertex_count)
else:
out.write(line)
out.write(("element vertex %s\n" % vertex_count).encode('utf8'))
else:
out.write(line.encode('utf8'))
i += 1
if i > 100:
@ -275,7 +275,7 @@ def fast_merge_ply(input_point_cloud_files, output_file):
with open(ipc, "rb") as fin:
# Skip header
line = fin.readline()
while line.strip().lower() != "end_header":
while line.strip().lower() != b"end_header":
line = fin.readline()
i += 1

Wyświetl plik

@ -32,8 +32,9 @@ class Broadcaster:
global_progress = 100
try:
sock.sendto("PGUP/{}/{}/{}".format(self.pid, self.project_name, float(global_progress)).encode('utf-8'), (UDP_IP, self.port))
except:
log.ODM_WARNING("Failed to broadcast progress update on UDP port %s" % str(self.port))
sock.sendto("PGUP/{}/{}/{}".format(self.pid, self.project_name, float(global_progress)).encode('utf8'),
(UDP_IP, self.port))
except Exception as e:
log.ODM_WARNING("Failed to broadcast progress update on UDP port %s (%s)" % (str(self.port), str(e)))
progressbc = Broadcaster(PROGRESS_BROADCAST_PORT)

Wyświetl plik

@ -20,6 +20,7 @@ def add_pseudo_georeferencing(geotiff):
dst_ds = gdal.Open(geotiff, GA_Update)
srs = osr.SpatialReference()
srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
srs.ImportFromProj4(get_pseudogeo_utm())
dst_ds.SetProjection( srs.ExportToWkt() )
dst_ds.SetGeoTransform( [ 0.0, get_pseudogeo_scale(), 0.0, 0.0, 0.0, -get_pseudogeo_scale() ] )

Wyświetl plik

@ -12,7 +12,7 @@ from opendm import config
from pyodm import Node, exceptions
from pyodm.utils import AtomicCounter
from pyodm.types import TaskStatus
from osfm import OSFMContext, get_submodel_args_dict, get_submodel_argv
from opendm.osfm import OSFMContext, get_submodel_args_dict, get_submodel_argv
from pipes import quote
try:
@ -474,7 +474,7 @@ class ToolchainTask(Task):
argv = get_submodel_argv(config.config(), submodels_path, submodel_name)
# Re-run the ODM toolchain on the submodel
system.run(" ".join(map(quote, argv)), env_vars=os.environ.copy())
system.run(" ".join(map(quote, map(str, argv))), env_vars=os.environ.copy())
# This will only get executed if the command above succeeds
self.touch(completed_file)

Wyświetl plik

@ -53,7 +53,7 @@ def sighandler(signum, frame):
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}):
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_paths=context.python_packages_paths):
"""Run a system command"""
global running_subprocesses
@ -63,6 +63,9 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}):
if len(env_paths) > 0:
env["PATH"] = env["PATH"] + ":" + ":".join(env_paths)
if len(packages_paths) > 0:
env["PYTHONPATH"] = env.get("PYTHONPATH", "") + ":" + ":".join(packages_paths)
for k in env_vars:
env[k] = str(env_vars[k])

Wyświetl plik

@ -0,0 +1,12 @@
0% 255 0 255
10% 128 0 255
20% 0 0 255
30% 0 128 255
40% 0 255 255
50% 0 255 128
60% 0 255 0
70% 128 255 0
80% 255 255 0
90% 255 128 0
100% 255 0 0
nv 0 0 0 0

Plik diff jest za duży Load Diff

Wyświetl plik

@ -0,0 +1,234 @@
#!/usr/bin/env python
#******************************************************************************
# $Id$
#
# Project: GDAL Python Interface
# Purpose: Script to merge greyscale as intensity into an RGB(A) image, for
# instance to apply hillshading to a dem colour relief.
# Author: Frank Warmerdam, warmerdam@pobox.com
# Trent Hare (USGS)
#
#******************************************************************************
# Copyright (c) 2009, Frank Warmerdam
# Copyright (c) 2010, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
import sys
import numpy
from osgeo import gdal
# =============================================================================
# rgb_to_hsv()
#
# rgb comes in as [r,g,b] with values in the range [0,255]. The returned
# hsv values will be with hue and saturation in the range [0,1] and value
# in the range [0,255]
#
def rgb_to_hsv( r,g,b ):
maxc = numpy.maximum(r,numpy.maximum(g,b))
minc = numpy.minimum(r,numpy.minimum(g,b))
v = maxc
minc_eq_maxc = numpy.equal(minc,maxc)
# compute the difference, but reset zeros to ones to avoid divide by zeros later.
ones = numpy.ones((r.shape[0],r.shape[1]))
maxc_minus_minc = numpy.choose( minc_eq_maxc, (maxc-minc,ones) )
s = (maxc-minc) / numpy.maximum(ones,maxc)
rc = (maxc-r) / maxc_minus_minc
gc = (maxc-g) / maxc_minus_minc
bc = (maxc-b) / maxc_minus_minc
maxc_is_r = numpy.equal(maxc,r)
maxc_is_g = numpy.equal(maxc,g)
maxc_is_b = numpy.equal(maxc,b)
h = numpy.zeros((r.shape[0],r.shape[1]))
h = numpy.choose( maxc_is_b, (h,4.0+gc-rc) )
h = numpy.choose( maxc_is_g, (h,2.0+rc-bc) )
h = numpy.choose( maxc_is_r, (h,bc-gc) )
h = numpy.mod(h/6.0,1.0)
hsv = numpy.asarray([h,s,v])
return hsv
# =============================================================================
# hsv_to_rgb()
#
# hsv comes in as [h,s,v] with hue and saturation in the range [0,1],
# but value in the range [0,255].
def hsv_to_rgb( hsv ):
h = hsv[0]
s = hsv[1]
v = hsv[2]
#if s == 0.0: return v, v, v
i = (h*6.0).astype(int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
r = i.choose( v, q, p, p, t, v )
g = i.choose( t, v, v, q, p, p )
b = i.choose( p, p, t, v, v, q )
rgb = numpy.asarray([r,g,b]).astype(numpy.uint8)
return rgb
# =============================================================================
# Usage()
def Usage():
print("""Usage: hsv_merge.py [-q] [-of format] src_color src_greyscale dst_color
where src_color is a RGB or RGBA dataset,
src_greyscale is a greyscale dataset (e.g. the result of gdaldem hillshade)
dst_color will be a RGB or RGBA dataset using the greyscale as the
intensity for the color dataset.
""")
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
format = 'GTiff'
src_color_filename = None
src_greyscale_filename = None
dst_color_filename = None
quiet = False
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of':
i = i + 1
format = argv[i]
elif arg == '-q' or arg == '-quiet':
quiet = True
elif src_color_filename is None:
src_color_filename = argv[i]
elif src_greyscale_filename is None:
src_greyscale_filename = argv[i]
elif dst_color_filename is None:
dst_color_filename = argv[i]
else:
Usage()
i = i + 1
if dst_color_filename is None:
Usage()
datatype = gdal.GDT_Byte
hilldataset = gdal.Open( src_greyscale_filename, gdal.GA_ReadOnly )
colordataset = gdal.Open( src_color_filename, gdal.GA_ReadOnly )
#check for 3 or 4 bands in the color file
if (colordataset.RasterCount != 3 and colordataset.RasterCount != 4):
print('Source image does not appear to have three or four bands as required.')
sys.exit(1)
#define output format, name, size, type and set projection
out_driver = gdal.GetDriverByName(format)
outdataset = out_driver.Create(dst_color_filename, colordataset.RasterXSize, \
colordataset.RasterYSize, colordataset.RasterCount, datatype)
outdataset.SetProjection(hilldataset.GetProjection())
outdataset.SetGeoTransform(hilldataset.GetGeoTransform())
#assign RGB and hillshade bands
rBand = colordataset.GetRasterBand(1)
gBand = colordataset.GetRasterBand(2)
bBand = colordataset.GetRasterBand(3)
if colordataset.RasterCount == 4:
aBand = colordataset.GetRasterBand(4)
else:
aBand = None
hillband = hilldataset.GetRasterBand(1)
hillbandnodatavalue = hillband.GetNoDataValue()
#check for same file size
if ((rBand.YSize != hillband.YSize) or (rBand.XSize != hillband.XSize)):
print('Color and hillshade must be the same size in pixels.')
sys.exit(1)
#loop over lines to apply hillshade
for i in range(hillband.YSize):
#load RGB and Hillshade arrays
rScanline = rBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
gScanline = gBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
bScanline = bBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
hillScanline = hillband.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
#convert to HSV
hsv = rgb_to_hsv( rScanline, gScanline, bScanline )
# if there's nodata on the hillband, use the v value from the color
# dataset instead of the hillshade value.
if hillbandnodatavalue is not None:
equal_to_nodata = numpy.equal(hillScanline, hillbandnodatavalue)
v = numpy.choose(equal_to_nodata,(hillScanline,hsv[2]))
else:
v = hillScanline
#replace v with hillshade
hsv_adjusted = numpy.asarray( [hsv[0], hsv[1], v] )
#convert back to RGB
dst_color = hsv_to_rgb( hsv_adjusted )
#write out new RGB bands to output one band at a time
outband = outdataset.GetRasterBand(1)
outband.WriteArray(dst_color[0], 0, i)
outband = outdataset.GetRasterBand(2)
outband.WriteArray(dst_color[1], 0, i)
outband = outdataset.GetRasterBand(3)
outband.WriteArray(dst_color[2], 0, i)
if aBand is not None:
aScanline = aBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
outband = outdataset.GetRasterBand(4)
outband.WriteArray(aScanline, 0, i)
#update progress line
if not quiet:
gdal.TermProgress_nocb( (float(i+1) / hillband.YSize) )

Wyświetl plik

@ -0,0 +1,34 @@
import os
from opendm import log
from opendm import system
from opendm import io
def generate_tiles(geotiff, output_dir, max_concurrency):
gdal2tiles = os.path.join(os.path.dirname(__file__), "gdal2tiles.py")
system.run('python3 "%s" --processes %s -z 5-21 -n -w none "%s" "%s"' % (gdal2tiles, max_concurrency, geotiff, output_dir))
def generate_orthophoto_tiles(geotiff, output_dir, max_concurrency):
try:
generate_tiles(geotiff, output_dir, max_concurrency)
except Exception as e:
log.ODM_WARNING("Cannot generate orthophoto tiles: %s" % str(e))
def generate_dem_tiles(geotiff, output_dir, max_concurrency):
relief_file = os.path.join(os.path.dirname(__file__), "color_relief.txt")
hsv_merge_script = os.path.join(os.path.dirname(__file__), "hsv_merge.py")
colored_dem = io.related_file_path(geotiff, postfix="color")
hillshade_dem = io.related_file_path(geotiff, postfix="hillshade")
colored_hillshade_dem = io.related_file_path(geotiff, postfix="colored_hillshade")
try:
system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem))
system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem))
system.run('python3 "%s" "%s" "%s" "%s"' % (hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
generate_tiles(colored_hillshade_dem, output_dir, max_concurrency)
# Cleanup
for f in [colored_dem, hillshade_dem, colored_hillshade_dem]:
if os.path.isfile(f):
os.remove(f)
except Exception as e:
log.ODM_WARNING("Cannot generate DEM tiles: %s" % str(e))

Wyświetl plik

@ -8,11 +8,11 @@ from pyproj import CRS
import xmltodict as x2d
from six import string_types
import log
import io
import system
import context
import logging
from opendm import log
from opendm import io
from opendm import system
from opendm import context
from opendm.progress import progressbc
from opendm.photo import ODM_Photo
@ -22,6 +22,7 @@ class ODM_Reconstruction(object):
self.photos = photos
self.georef = None
self.gcp = None
self.geo_file = None
self.multi_camera = self.detect_multi_camera()
def detect_multi_camera(self):
@ -200,7 +201,7 @@ class ODM_GeoRef(object):
class ODM_Tree(object):
def __init__(self, root_path, gcp_file = None):
def __init__(self, root_path, gcp_file = None, geo_file = None):
# root path to the project
self.root_path = io.absolute_path_file(root_path)
self.input_images = io.join_paths(self.root_path, 'images')
@ -265,6 +266,8 @@ class ODM_Tree(object):
self.odm_georeferencing, 'coords.txt')
self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path)
self.odm_georeferencing_gcp_utm = io.join_paths(self.odm_georeferencing, 'gcp_list_utm.txt')
self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path)
self.odm_georeferencing_utm_log = io.join_paths(
self.odm_georeferencing, 'odm_georeferencing_utm_log.txt')
self.odm_georeferencing_log = 'odm_georeferencing_log.txt'
@ -290,6 +293,9 @@ class ODM_Tree(object):
self.odm_orthophoto_log = io.join_paths(self.odm_orthophoto, 'odm_orthophoto_log.txt')
self.odm_orthophoto_tif_log = io.join_paths(self.odm_orthophoto, 'gdal_translate_log.txt')
# tiles
self.orthophoto_tiles = io.join_paths(self.root_path, "orthophoto_tiles")
# Split-merge
self.submodels_path = io.join_paths(self.root_path, 'submodels')

Wyświetl plik

@ -1,93 +1,18 @@
FROM phusion/baseimage:0.10.2 as base
FROM ubuntu:18.04
# Env variables
ENV DEBIAN_FRONTEND noninteractive
ENV PYTHONPATH "$PYTHONPATH:/code/SuperBuild/install/lib/python3.6/dist-packages"
ENV PYTHONPATH "$PYTHONPATH:/code/SuperBuild/src/opensfm"
ENV LD_LIBRARY_PATH "$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
#Install dependencies and required requisites
RUN add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable \
&& add-apt-repository -y ppa:george-edison55/cmake-3.x \
&& apt-get update -y \
&& apt-get install --no-install-recommends -y \
build-essential \
cmake \
gdal-bin \
git \
libatlas-base-dev \
libavcodec-dev \
libavformat-dev \
libboost-date-time-dev \
libboost-filesystem-dev \
libboost-iostreams-dev \
libboost-log-dev \
libboost-python-dev \
libboost-regex-dev \
libboost-thread-dev \
libeigen3-dev \
libflann-dev \
libgdal-dev \
libgeotiff-dev \
libgoogle-glog-dev \
libgtk2.0-dev \
libjasper-dev \
libjpeg-dev \
libjsoncpp-dev \
liblapack-dev \
liblas-bin \
libpng-dev \
libproj-dev \
libsuitesparse-dev \
libswscale-dev \
libtbb2 \
libtbb-dev \
libtiff-dev \
libvtk6-dev \
libxext-dev \
python-dev \
python-gdal \
python-matplotlib \
python-pip \
python-software-properties \
python-wheel \
software-properties-common \
swig2.0 \
grass-core \
libssl-dev \
&& apt-get remove libdc1394-22-dev \
&& pip install --upgrade pip \
&& pip install setuptools
# Prepare directories
WORKDIR /code
# Copy everything
COPY . ./
RUN pip install -r requirements.txt \
&& pip install --upgrade cryptography \
&& python -m easy_install --upgrade pyOpenSSL
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python2.7/dist-packages"
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/src/opensfm"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Replace g++ and gcc with our own scripts
COPY /docker/ /code/docker/
RUN mv -v /usr/bin/gcc /usr/bin/gcc_real \
&& mv -v /usr/bin/g++ /usr/bin/g++_real \
&& cp -v /code/docker/gcc /usr/bin/gcc \
&& cp -v /code/docker/g++ /usr/bin/g++
# Compile code in SuperBuild and root directories
RUN cd SuperBuild \
&& mkdir build \
&& cd build \
&& cmake .. \
&& make -j$(nproc) \
&& cd ../.. \
&& mkdir build \
&& cd build \
&& cmake .. \
&& make -j$(nproc)
RUN PORTABLE_INSTALL=YES bash configure.sh install
# Cleanup APT
RUN apt-get clean \
@ -105,5 +30,5 @@ RUN rm -rf \
/code/SuperBuild/src/pdal
# Entry point
ENTRYPOINT ["python", "/code/run.py"]
ENTRYPOINT ["python3", "/code/run.py"]

Wyświetl plik

@ -13,7 +13,7 @@ pyodm==1.5.4
Pillow==6.1.0
networkx==2.2
scipy==1.2.1
numpy==1.15.4
numpy==1.19.2
pyproj==2.2.2
Pysolar==0.6
psutil==5.6.3
@ -25,3 +25,5 @@ scikit-learn==0.20
laspy==1.6.0
beautifulsoup4==4.9.1
lxml==4.5.1
matplotlib==1.5.1
edt==2.0.1

10
run.py
Wyświetl plik

@ -1,4 +1,10 @@
#!/usr/bin/python
#!/usr/bin/python3
# Basic check
import sys
if sys.version_info.major < 3:
print("Ups! ODM needs to run with Python 3. It seems you launched it with Python 2. Try using: python3 run.py ... ")
sys.exit(1)
from opendm import log
from opendm import config
@ -104,4 +110,4 @@ if __name__ == '__main__':
log.ODM_INFO('MMMMMMMMMMMN- smNm/ +MMm :NNdo` .mMM` oMM+/yMM/ MMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMNo- `:yMMMm `:sNMMM` sMMMMMMM+ NMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMNmmNMMMMMMMNmmmmNMMMMMMMNNMMMMMMMMMNNMMMMMMMMMMMM')
log.ODM_INFO('OpenDroneMap app finished - %s' % system.now())
log.ODM_INFO('ODM app finished - %s' % system.now())

3
run.sh
Wyświetl plik

@ -1,7 +1,6 @@
#!/bin/bash
RUNPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PYTHONPATH=$RUNPATH/SuperBuild/install/lib/python2.7/dist-packages:$RUNPATH/SuperBuild/src/opensfm:$PYTHONPATH
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RUNPATH/SuperBuild/install/lib
python $RUNPATH/run.py "$@"
python3 $RUNPATH/run.py "$@"

Wyświetl plik

@ -6,13 +6,14 @@ from opendm import io
from opendm import types
from opendm import log
from opendm import system
from opendm.geo import GeoFile
from shutil import copyfile
from opendm import progress
def save_images_database(photos, database_file):
with open(database_file, 'w') as f:
f.write(json.dumps(map(lambda p: p.__dict__, photos)))
f.write(json.dumps([p.__dict__ for p in photos]))
log.ODM_INFO("Wrote images database: %s" % database_file)
@ -39,7 +40,7 @@ def load_images_database(database_file):
class ODMLoadDatasetStage(types.ODM_Stage):
def process(self, args, outputs):
tree = types.ODM_Tree(args.project_path, args.gcp)
tree = types.ODM_Tree(args.project_path, args.gcp, args.geo)
outputs['tree'] = tree
if args.time and io.file_exists(tree.benchmarking):
@ -48,26 +49,38 @@ class ODMLoadDatasetStage(types.ODM_Stage):
with open(tree.benchmarking, 'a') as b:
b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
# check if the extension is supported
def supported_extension(file_name):
(pathfn, ext) = os.path.splitext(file_name)
return ext.lower() in context.supported_extensions
# check if the image filename is supported
def valid_image_filename(filename):
(pathfn, ext) = os.path.splitext(filename)
return ext.lower() in context.supported_extensions and pathfn[-5:] != "_mask"
# Get supported images from dir
def get_images(in_dir):
# filter images for its extension type
log.ODM_DEBUG(in_dir)
return [f for f in io.get_files_list(in_dir) if supported_extension(f)]
entries = os.listdir(in_dir)
valid, rejects = [], []
for f in entries:
if valid_image_filename(f):
valid.append(f)
else:
rejects.append(f)
return valid, rejects
def find_mask(photo_path, masks):
(pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
k = "{}_mask".format(pathfn)
mask = masks.get(k)
if mask:
# Spaces are not supported due to OpenSfM's mask_list.txt format reqs
if not " " in mask:
return mask
else:
log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask))
# get images directory
input_dir = tree.input_images
images_dir = tree.dataset_raw
if not io.dir_exists(images_dir):
log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir)
system.mkdir_p(images_dir)
copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)]
# define paths and create working directories
system.mkdir_p(tree.odm_georeferencing)
if not args.use_3dmesh: system.mkdir_p(tree.odm_25dgeoreferencing)
@ -77,18 +90,39 @@ class ODMLoadDatasetStage(types.ODM_Stage):
# check if we rerun cell or not
images_database_file = io.join_paths(tree.root_path, 'images.json')
if not io.file_exists(images_database_file) or self.rerun():
files = get_images(images_dir)
files, rejects = get_images(images_dir)
if files:
# create ODMPhoto list
path_files = [io.join_paths(images_dir, f) for f in files]
# Lookup table for masks
masks = {}
for r in rejects:
(p, ext) = os.path.splitext(r)
if p[-5:] == "_mask" and ext.lower() in context.supported_extensions:
masks[p] = r
photos = []
with open(tree.dataset_list, 'w') as dataset_list:
log.ODM_INFO("Loading %s images" % len(path_files))
for f in path_files:
photos += [types.ODM_Photo(f)]
p = types.ODM_Photo(f)
p.set_mask(find_mask(f, masks))
photos += [p]
dataset_list.write(photos[-1].filename + '\n')
# Check if a geo file is available
if tree.odm_geo_file is not None and os.path.exists(tree.odm_geo_file):
log.ODM_INFO("Found image geolocation file")
gf = GeoFile(tree.odm_geo_file)
updated = 0
for p in photos:
entry = gf.get_entry(p.filename)
if entry:
p.update_with_geo_entry(entry)
updated += 1
log.ODM_INFO("Updated %s image positions" % updated)
# Save image database for faster restart
save_images_database(photos, images_database_file)
else:

Wyświetl plik

@ -77,7 +77,8 @@ class ODMMveStage(types.ODM_Stage):
self.update_progress(90)
scene2pset_config = [
"-F%s" % mve_output_scale
"-F%s" % mve_output_scale,
'-mmask'
]
# run scene2pset

Wyświetl plik

@ -6,19 +6,19 @@ from opendm import io
from opendm import system
from opendm import log
from dataset import ODMLoadDatasetStage
from run_opensfm import ODMOpenSfMStage
from mve import ODMMveStage
from odm_slam import ODMSlamStage
from odm_meshing import ODMeshingStage
from mvstex import ODMMvsTexStage
from odm_georeferencing import ODMGeoreferencingStage
from odm_orthophoto import ODMOrthoPhotoStage
from odm_dem import ODMDEMStage
from odm_filterpoints import ODMFilterPoints
from splitmerge import ODMSplitStage, ODMMergeStage
from odm_report import ODMReport
from stages.dataset import ODMLoadDatasetStage
from stages.run_opensfm import ODMOpenSfMStage
from stages.mve import ODMMveStage
from stages.odm_slam import ODMSlamStage
from stages.odm_meshing import ODMeshingStage
from stages.mvstex import ODMMvsTexStage
from stages.odm_georeferencing import ODMGeoreferencingStage
from stages.odm_orthophoto import ODMOrthoPhotoStage
from stages.odm_dem import ODMDEMStage
from stages.odm_filterpoints import ODMFilterPoints
from stages.splitmerge import ODMSplitStage, ODMMergeStage
from stages.odm_report import ODMReport
class ODMApp:
def __init__(self, args):

Wyświetl plik

@ -10,6 +10,7 @@ from opendm import gsd
from opendm.dem import commands, utils
from opendm.cropper import Cropper
from opendm import pseudogeo
from opendm.tiles.tiler import generate_dem_tiles
class ODMDEMStage(types.ODM_Stage):
def process(self, args, outputs):
@ -101,7 +102,7 @@ class ODMDEMStage(types.ODM_Stage):
dem_input,
product,
output_type='idw' if product == 'dtm' else 'max',
radiuses=map(str, radius_steps),
radiuses=list(map(str, radius_steps)),
gapfill=args.dem_gapfill_steps > 0,
outdir=odm_dem_root,
resolution=resolution / 100.0,
@ -128,9 +129,12 @@ class ODMDEMStage(types.ODM_Stage):
commands.compute_euclidean_map(unfilled_dem_path,
io.related_file_path(dem_geotiff_path, postfix=".euclideand"),
overwrite=True)
if pseudo_georeference:
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
if args.tiles:
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency)
progress += 30
self.update_progress(progress)

Wyświetl plik

@ -120,7 +120,12 @@ class ODMGeoreferencingStage(types.ODM_Stage):
log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud")
cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model')
decimation_step = 40 if args.fast_orthophoto or args.use_opensfm_dense else 90
if args.fast_orthophoto:
decimation_step = 10
elif args.use_opensfm_dense:
decimation_step = 40
else:
decimation_step = 90
# More aggressive decimation for large datasets
if not args.fast_orthophoto:

Wyświetl plik

@ -148,7 +148,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
os.path.join(tree.odm_orthophoto, "odm_orthophoto_cut.tif"),
blend_distance=20, only_max_coords_feature=True)
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif)
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles)
# Generate feathered orthophoto also
if args.orthophoto_cutline:

Wyświetl plik

@ -18,7 +18,7 @@ from opendm.remote import LocalRemoteExecutor
from opendm.shots import merge_geojson_shots
from opendm import point_cloud
from pipes import quote
from opendm.tiles.tiler import generate_dem_tiles
class ODMSplitStage(types.ODM_Stage):
def process(self, args, outputs):
@ -157,9 +157,9 @@ class ODMSplitStage(types.ODM_Stage):
#Create image lists
with open(path+"/opensfm/image_list.txt", "w") as o:
o.writelines(map(lambda x: "../images/"+x+'\n', v["shots"].keys()))
o.writelines(list(map(lambda x: "../images/"+x+'\n', v["shots"].keys())))
with open(path+"/img_list.txt", "w") as o:
o.writelines(map(lambda x: x+'\n', v["shots"].keys()))
o.writelines(list(map(lambda x: x+'\n', v["shots"].keys())))
i+=1
os.rename(octx.path("../submodels"), octx.path("../unaligned_submodels"))
@ -216,7 +216,7 @@ class ODMSplitStage(types.ODM_Stage):
argv = get_submodel_argv(args, tree.submodels_path, sp_octx.name())
# Re-run the ODM toolchain on the submodel
system.run(" ".join(map(quote, argv)), env_vars=os.environ.copy())
system.run(" ".join(map(quote, map(str, argv))), env_vars=os.environ.copy())
else:
lre.set_projects([os.path.abspath(os.path.join(p, "..")) for p in submodel_paths])
lre.run_toolchain()
@ -293,7 +293,7 @@ class ODMMergeStage(types.ODM_Stage):
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars)
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif)
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles)
elif len(all_orthos_and_ortho_cuts) == 1:
# Simply copy
log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.")
@ -331,8 +331,12 @@ class ODMMergeStage(types.ODM_Stage):
if args.crop > 0:
Cropper.crop(merged_bounds_file, dem_file, dem_vars, keep_original=not args.optimize_disk_space)
log.ODM_INFO("Created %s" % dem_file)
if args.tiles:
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency)
else:
log.ODM_WARNING("Cannot merge %s, %s was not created" % (human_name, dem_file))
else:
log.ODM_WARNING("Found merged %s in %s" % (human_name, dem_filename))

Wyświetl plik

@ -11,8 +11,9 @@ if [ "$1" = "--setup" ]; then
#bash configure.sh reinstall
touch .setupdevenv
apt install -y vim
chown -R $3:$4 /code /var/www
apt update && apt install -y vim
chown -R $3:$4 /code
chown -R $3:$4 /var/www
fi
echo "Adding $2 to /etc/passwd"
@ -44,7 +45,13 @@ if [ "$1" = "--setup" ]; then
# Colors
echo "alias ls='ls --color=auto'" >> $HOME/.bashrc
su -c bash $2
# Python paths
echo $(python /code/opendm/context.py) >> $HOME/.bashrc
# Misc aliases
echo "alias pdal=/code/SuperBuild/install/bin/pdal" >> $HOME/.bashrc
su -c bash $2
exit 0
fi
@ -75,6 +82,7 @@ fi
export PORT="${PORT:=3000}"
export QTC="${QTC:=NO}"
export IMAGE="${IMAGE:=opendronemap/nodeodm}"
if [ -z "$DATA" ]; then
echo "Usage: DATA=/path/to/datasets [VARS] $0"
@ -82,6 +90,7 @@ if [ -z "$DATA" ]; then
echo "VARS:"
echo " DATA Path to directory that contains datasets for testing. The directory will be mounted in /datasets. If you don't have any, simply set it to a folder outside the ODM repository."
echo " PORT Port to expose for NodeODM (default: $PORT)"
echo " IMAGE Docker image to use (default: $IMAGE)"
echo " QTC When set to YES, installs QT Creator for C++ development (default: $QTC)"
exit 1
fi
@ -89,8 +98,9 @@ fi
echo "Starting development environment..."
echo "Datasets path: $DATA"
echo "NodeODM port: $PORT"
echo "Expose port: $PORT"
echo "QT Creator: $QTC"
echo "Image: $IMAGE"
if [ ! -e "$HOME"/.odm-dev-home ]; then
mkdir -p "$HOME"/.odm-dev-home
@ -100,5 +110,5 @@ USER_ID=$(id -u)
GROUP_ID=$(id -g)
USER=$(id -un)
xhost +
docker run -ti --entrypoint bash --name odmdev -v $(pwd):/code -v "$DATA":/datasets -p $PORT:3000 --privileged -e DISPLAY -e LANG=C.UTF-8 -e LC_ALL=C.UTF-8 -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v="$HOME/.odm-dev-home:/home/$USER" opendronemap/nodeodm -c "/code/start-dev-env.sh --setup $USER $USER_ID $GROUP_ID $QTC"
exit 0
docker run -ti --entrypoint bash --name odmdev -v $(pwd):/code -v "$DATA":/datasets -p $PORT:3000 --privileged -e DISPLAY -e LANG=C.UTF-8 -e LC_ALL=C.UTF-8 -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v="$HOME/.odm-dev-home:/home/$USER" $IMAGE -c "/code/start-dev-env.sh --setup $USER $USER_ID $GROUP_ID $QTC"
exit 0

Wyświetl plik

@ -1,5 +1,5 @@
if [ ! -z "$1" ]; then
python -m unittest discover tests "test_$1.py"
python3 -m unittest discover tests "test_$1.py"
else
python -m unittest discover tests "test_*.py"
python3 -m unittest discover tests "test_*.py"
fi

Wyświetl plik

@ -15,7 +15,7 @@ class TestCamera(unittest.TestCase):
def test_camera(self):
c = camera.get_cameras_from_opensfm("tests/assets/reconstruction.json")
self.assertEqual(len(c.keys()), 1)
camera_id = c.keys()[0]
camera_id = list(c.keys())[0]
self.assertTrue('v2 ' not in camera_id)
self.assertRaises(RuntimeError, camera.get_cameras_from_opensfm, 'tests/assets/nonexistant.json')
@ -27,7 +27,7 @@ class TestCamera(unittest.TestCase):
osfm_c = camera.get_opensfm_camera_models(c)
self.assertEqual(len(osfm_c.keys()), 1)
c1 = osfm_c[osfm_c.keys()[0]]
c1 = osfm_c[list(osfm_c.keys())[0]]
self.assertTrue('k1_prior' in c1)
self.assertTrue('k2_prior' in c1)
self.assertFalse('test' in c1)

Wyświetl plik

@ -33,8 +33,8 @@ class TestGcp(unittest.TestCase):
copy = GCPFile(gcp.create_utm_copy("tests/assets/output/gcp_utm.txt"))
self.assertTrue(copy.exists())
self.assertEqual(copy.raw_srs, "WGS84 UTM 16N")
self.assertEqual(copy.get_entry(0).x, 609865.707705)
self.assertEqual(copy.get_entry(0).y, 4950688.36182)
self.assertEqual(copy.get_entry(0).x, 609865.7077054137)
self.assertEqual(copy.get_entry(0).y, 4950688.361817497)
def test_utm_conversion_feet(self):
gcp = GCPFile("tests/assets/gcp_michigan_feet_valid.txt")
@ -43,7 +43,7 @@ class TestGcp(unittest.TestCase):
self.assertEqual(copy.raw_srs, "WGS84 UTM 16N")
self.assertEqual(round(copy.get_entry(0).x, 3), 609925.818)
self.assertEqual(round(copy.get_entry(0).y, 3), 4950688.772)
self.assertEqual(round(copy.get_entry(0).z, 3), 171.663)
self.assertEqual(round(copy.get_entry(0).z, 3), 563.199)
def test_filtered_copy(self):
gcp = GCPFile('tests/assets/gcp_latlon_valid.txt')