Update code to nearly comply with PEP8

Former-commit-id: e966c411dc
gh-pages
Dakota Benjamin 2015-08-27 08:59:35 -04:00
rodzic a7b52c9a8a
commit 89d553c541
1 zmienionych plików z 388 dodań i 327 usunięć

297
run.py 100644 → 100755
Wyświetl plik

@ -1,31 +1,51 @@
#!/usr/bin/python
import os, sys, multiprocessing, json, datetime, re, subprocess, shutil, shlex, collections, fractions, argparse
import os
import sys
import multiprocessing
import json
import datetime
import re
import subprocess
import shutil
import shlex
# import collections # Never used
import fractions
import argparse
import knnMatch_exif
## the defs
# the defs
CURRENT_DIR = os.getcwd()
BIN_PATH_ABS = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
CORES = multiprocessing.cpu_count()
def getCcdWidths():
with open(BIN_PATH_ABS + "/ccd_defs.json") as jsonFile:
def get_ccd_widths():
"""Return the CCD Width of the camera listed in the JSON defs file."""
with open(BIN_PATH_ABS + '/ccd_defs.json') as jsonFile:
return json.load(jsonFile)
objects = []
ccdWidths = getCcdWidths()
ccdWidths = get_ccd_widths()
BIN_PATH = BIN_PATH_ABS + "/bin"
BIN_PATH = BIN_PATH_ABS + '/bin'
objectStats = {
'count': 0, 'good': 0, 'bad': 0, 'minWidth': 0, 'minHeight': 0,
'maxWidth': 0, 'maxHeight': 0
}
objectStats = {'count': 0, "good": 0, "bad": 0, "minWidth": 0, "minHeight": 0, "maxWidth": 0, "maxHeight": 0}
jobOptions = {'resizeTo': 0, 'srcDir': CURRENT_DIR, 'utmZone': -999, 'utmSouth': False, 'utmEastOffset': 0, 'utmNorthOffset': 0}
jobOptions = {'resizeTo': 0, 'srcDir': CURRENT_DIR, 'utmZone': -999,
'utmSouth': False, 'utmEastOffset': 0, 'utmNorthOffset': 0}
# parse arguments
processopts = ['resize', 'getKeypoints', 'match', 'bundler', 'cmvs', 'pmvs',
'odm_meshing', 'odm_texturing', 'odm_georeferencing',
'odm_orthophoto']
parser = argparse.ArgumentParser(description='OpenDroneMap')
parser.add_argument('--resize-to', '-r', #currently doesn't support 'orig'
parser.add_argument('--resize-to', # currently doesn't support 'orig'
metavar='<integer>',
default=2400,
type=int,
@ -34,37 +54,25 @@ parser.add_argument('--resize-to', '-r', #currently doesn't support 'orig'
parser.add_argument('--start-with', '-s',
metavar='<string>',
default='resize',
choices = ['resize', 'getKeypoints', 'match', 'bundler',
'cmvs', 'pmvs', 'odm_meshing', 'odm_texturing',
'odm_georeferencing', 'odm_orthophoto'],
help = 'can be one of: resize getKeypoints match bundler \
cmvs pmvs odm_meshing odm_texturing odm_georeferencing \
odm_orthophoto')
choices=processopts,
help=('Can be one of: ' + ' | '.join(processopts)))
parser.add_argument('--end-with', '-e',
metavar='<string>',
default='odm_orthophoto',
choices = ['resize', 'getKeypoints', 'match', 'bundler',
'cmvs', 'pmvs', 'odm_meshing', 'odm_texturing',
'odm_georeferencing', 'odm_orthophoto'],
help = 'can be one of: resize getKeypoints match bundler \
cmvs pmvs odm_meshing odm_texturing odm_georeferencing \
odm_orthophoto')
choices=processopts,
help=('Can be one of:' + ' | '.join(processopts)))
parser.add_argument('--run-only',
metavar='<string>',
choices = ['resize', 'getKeypoints', 'match', 'bundler',
'cmvs', 'pmvs', 'odm_meshing', 'odm_texturing',
'odm_georeferencing', 'odm_orthophoto'],
help = 'can be one of: resize getKeypoints match bundler \
cmvs pmvs odm_meshing odm_texturing odm_georeferencing \
odm_orthophoto')
choices=processopts,
help=('Can be one of:' + ' | '.join(processopts)))
parser.add_argument('--force-focal',
metavar='<positive float>',
type=float,
help = 'Override the focal length information \
for the images')
help=('Override the focal length information for the '
'images'))
parser.add_argument('--force-ccd',
metavar='<positive float>',
@ -75,35 +83,37 @@ parser.add_argument('--matcher-threshold',
metavar='<percent>',
default=2.0,
type=float,
help = 'Ignore matched keypoints if the two images share \
less than <float> percent of keypoints')
help=('Ignore matched keypoints if the two images share '
'less than <float> percent of keypoints'))
parser.add_argument('--matcher-ratio',
metavar='<float>',
default=0.6,
type=float,
help = 'Ratio of the distance to the next best matched \
keypoint')
help=('Ratio of the distance to the next best matched '
'keypoint'))
parser.add_argument('--matcher-preselect',
type=bool,
metavar='',
default=True,
help = 'use GPS exif data, if available, to match each \
image only with its k-nearest neighbors, or all images \
within a certain distance threshold')
help=('use GPS exif data, if available, to match each '
'image only with its k-nearest neighbors, or all '
'images within a certain distance threshold'))
parser.add_argument('--matcher-useKnn',
type=bool,
metavar='',
default=True,
help = 'use GPS exif data, if available, to match each \
image only with its k-nearest neighbors, or all images \
within a certain distance threshold')
help=('use GPS exif data, if available, to match each '
'image only with its k-nearest neighbors, or all '
'images within a certain distance threshold'))
parser.add_argument('--matcher-kDistance',
metavar='<integer>',
default=20,
type=int,
help = 'The maximum number of images per cluster')
help='')
parser.add_argument('--cmvs-maxImages',
metavar='<integer>',
@ -115,10 +125,10 @@ parser.add_argument('--pmvs-level',
metavar='<positive integer>',
default=1,
type=int,
help = 'The level in the image pyramid that is used \
for the computation. see \
http://www.di.ens.fr/pmvs/documentation.html for \
more pmvs documentation')
help=('The level in the image pyramid that is used '
'for the computation. see '
'http://www.di.ens.fr/pmvs/documentation.html for '
'more pmvs documentation'))
parser.add_argument('--pmvs-csize',
metavar='< positive integer>',
@ -130,28 +140,28 @@ parser.add_argument('--pmvs-threshold',
metavar='<float: -1.0 <= x <= 1.0>',
default=0.7,
type=float,
help = 'A patch reconstruction is accepted as a success \
and kept, if its associcated photometric consistency \
measure is above this threshold.')
help=('A patch reconstruction is accepted as a success '
'and kept, if its associcated photometric consistency '
'measure is above this threshold.'))
parser.add_argument('--pmvs-wsize',
metavar='<positive integer>',
default=7,
type=int,
help = 'pmvs samples wsize x wsize pixel colors from \
each image to compute photometric consistency score. \
For example, when wsize=7, 7x7=49 pixel colors are \
sampled in each image. Increasing the value leads to \
more stable reconstructions, but the program becomes \
slower.')
help=('pmvs samples wsize x wsize pixel colors from '
'each image to compute photometric consistency '
'score. For example, when wsize=7, 7x7=49 pixel '
'colors are sampled in each image. Increasing the '
'value leads to more stable reconstructions, but '
'the program becomes slower.'))
parser.add_argument('--pmvs-minImageNum',
metavar='<positive integer>',
default=3,
type=int,
help = 'Each 3D point must be visible in at least \
minImageNum images for being reconstructed. 3 is \
suggested in general.')
help=('Each 3D point must be visible in at least '
'minImageNum images for being reconstructed. 3 is '
'suggested in general.'))
parser.add_argument('--odm_meshing-maxVertexCount',
metavar='<positive integer>',
@ -163,47 +173,55 @@ parser.add_argument('--odm_meshing-octreeDepth',
metavar='<positive integer>',
default=9,
type=int,
help = 'Oct-tree depth used in the mesh reconstruction, \
increase to get more vertices, recommended values are \
8-12')
help=('Oct-tree depth used in the mesh reconstruction, '
'increase to get more vertices, recommended '
'values are 8-12'))
parser.add_argument('--odm_meshing-samplesPerNode',
metavar='<float >= 1.0>',
default=1,
type=float,
help = 'Number of points per octree node, recommended \
value: 1.0')
help=('Number of points per octree node, recommended '
'value: 1.0'))
parser.add_argument('--odm_meshing-solverDivide',
metavar='<positive integer>',
default=9,
type=int,
help = 'Oct-tree depth at which the Laplacian equation \
is solved in the surface reconstruction step. \
Increasing this value increases computation times \
slightly but helps reduce memory usage.')
help=('Oct-tree depth at which the Laplacian equation '
'is solved in the surface reconstruction step. '
'Increasing this value increases computation '
'times slightly but helps reduce memory usage.'))
parser.add_argument('--odm_texturing-textureResolution',
metavar='<positive integer>',
default=4096,
type=int,
help = 'The resolution of the output textures. Must be \
greater than textureWithSize.')
help=('The resolution of the output textures. Must be '
'greater than textureWithSize.'))
parser.add_argument('--odm_texturing-textureWithSize',
metavar='<positive integer>',
default=3600,
type=int,
help = 'The resolution to rescale the images performing \
the texturing.')
help=('The resolution to rescale the images performing '
'the texturing.'))
parser.add_argument('--odm_georeferencing-useGcp',
metavar='<True | False>',
default=True,
type=bool,
help=('Skip reading the GCP file and use EXIF data to '
'extract geographical coordinates for generating '
'an orthophoto.'))
parser.add_argument('--odm_georeferencing-gcpFile',
metavar='<path string>',
default='gcp_list.txt',
help = 'path to the file containing the ground control \
points used for georeferencing.The file needs to be on \
the following line format: \
\neasting northing height pixelrow pixelcol imagename')
help=('path to the file containing the ground control '
'points used for georeferencing.The file needs to '
'be on the following line format: \neasting '
'northing height pixelrow pixelcol imagename'))
parser.add_argument('--zip-results',
action='store_true',
@ -222,51 +240,65 @@ print vars(args)
def run(cmd):
"""Run a system command"""
returnCode = os.system(cmd)
if (returnCode != 0):
sys.exit("\nquitting cause: \n\t" + cmd + "\nreturned with code " + str(returnCode) + ".\n")
sys.exit("\nquitting cause: \n\t" + cmd + "\nreturned with code " +
str(returnCode) + ".\n")
def now():
"""Return the current time"""
return datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Z %Y')
def runAndReturn(cmdSrc, cmdDest):
def run_and_return(cmdSrc, cmdDest):
"""Run a system command and return the output"""
srcProcess = subprocess.Popen(shlex.split(cmdSrc), stdout=subprocess.PIPE)
if cmdDest:
destProcess = subprocess.Popen(shlex.split(cmdDest), stdin=srcProcess.stdout, stdout=subprocess.PIPE)
destProcess = subprocess.Popen(shlex.split(cmdDest),
stdin=srcProcess.stdout,
stdout=subprocess.PIPE)
stdout, stderr = destProcess.communicate()
else:
stdout, stderr = srcProcess.communicate()
return stdout.decode('ascii')
def calculateEpsg(utmZone, south):
def calculate_EPSG(utmZone, south):
"""Calculate and return the EPSG"""
if south:
return 32700 + utmZone
else:
return 32600 + utmZone
def parseCoordinateSystem():
if os.path.isfile(jobOptions["jobDir"] + "/odm_georeferencing/coordFile.txt"):
with open(jobOptions["jobDir"] + "/odm_georeferencing/coordFile.txt") as f:
def parse_coordinate_system():
"""Write attributes to jobOptions from coord file"""
if os.path.isfile(jobOptions['jobDir'] +
'/odm_georeferencing/coordFile.txt'):
with open(jobOptions['jobDir'] + '/odm_georeferencing/coordFile.txt') as f:
for lineNumber, line in enumerate(f):
if lineNumber == 0:
tokens = line.split(' ')
if len(tokens) == 3:
utmZoneString = tokens[2][0:len(tokens[2])-2].strip()
utmSouthBool = (tokens[2][len(tokens[2])-2].strip() == 'S')
jobOptions["csString"] = "+datum=WGS84 +proj=utm +zone=" + utmZoneString + (" +south" if utmSouthBool else "")
jobOptions["epsg"] = calculateEpsg(int(utmZoneString), utmSouthBool)
jobOptions['csString'] = '+datum=WGS84 +proj=utm +zone=' + utmZoneString + (' +south' if utmSouthBool else '')
jobOptions['epsg'] = calculate_EPSG(int(utmZoneString), utmSouthBool)
elif lineNumber == 1:
tokens = line.split(' ')
if len(tokens) == 2:
jobOptions["utmEastOffset"] = int(tokens[0].strip())
jobOptions["utmNorthOffset"] = int(tokens[1].strip())
jobOptions['utmEastOffset'] = int(tokens[0].strip())
jobOptions['utmNorthOffset'] = int(tokens[1].strip())
else:
break
def prepareObjects():
## get the source list
source_files = runAndReturn('ls -1', 'egrep "\.[jJ]{1}[pP]{1}[eE]{0,1}[gG]{1}"')
def prepare_objects():
"""Prepare the jobOptions and fileObjects dicts"""
source_files = run_and_return('ls -1', 'egrep "\.[jJ]{1}[pP]{1}[eE]{0,1}[gG]{1}"')
print "\n - source files - " + now()
@ -274,11 +306,11 @@ def prepareObjects():
filename = filename.rstrip('\n')
if not filename:
continue
file_make = runAndReturn('jhead "' + filename + '"', 'grep "Camera make"')
file_model = runAndReturn('jhead "' + filename + '"', 'grep "Camera model"')
file_focal = runAndReturn('jhead "' + filename + '"', 'grep "Focal length"')
file_ccd = runAndReturn('jhead "' + filename + '"', 'grep "CCD width"')
file_resolution = runAndReturn('jhead "' + filename + '"', 'grep "Resolution"')
file_make = run_and_return('jhead "' + filename + '"', 'grep "Camera make"')
file_model = run_and_return('jhead "' + filename + '"', 'grep "Camera model"')
file_focal = run_and_return('jhead "' + filename + '"', 'grep "Focal length"')
file_ccd = run_and_return('jhead "' + filename + '"', 'grep "CCD width"')
file_resolution = run_and_return('jhead "' + filename + '"', 'grep "Resolution"')
fileObject = {}
@ -311,19 +343,19 @@ def prepareObjects():
fileObject["width"] = int(match.group(1).strip())
fileObject["height"] = int(match.group(2).strip())
if not '--force-focal' in args:
if '--force-focal' not in args:
match = re.search(":[\ ]*([0-9\.]*)mm", file_focal)
if match:
fileObject["focal"] = float((match.group()[1:-2]).strip())
else:
fileObject["focal"] = args.force_focal
if not '--force-ccd' in args:
if '--force-ccd' not in args:
match = re.search(":[\ ]*([0-9\.]*)mm", file_ccd)
if match:
fileObject["ccd"] = float(match.group()[1:-2].strip())
if (not "ccd" in fileObject) and ("id" in fileObject):
if ("ccd" not in fileObject) and ("id" in fileObject):
fileObject["ccd"] = float(ccdWidths[fileObject["id"]])
else:
fileObject["ccd"] = args.force_ccd
@ -377,8 +409,7 @@ def prepareObjects():
objects.append(fileObject)
if not "good" in objectStats:
if "good" not in objectStats:
print "\n found no usable images - quitting\n"
sys.exit()
else:
@ -416,7 +447,9 @@ def prepareObjects():
fileObject["step_1_keyFile"] = jobOptions["jobDir"] + "/" + fileObject["base"] + ".key"
fileObject["step_1_gzFile"] = jobOptions["jobDir"] + "/" + fileObject["base"] + ".key.gz"
def resize():
"""Resize images"""
print "\n - preparing images - " + now()
os.chdir(jobOptions["jobDir"])
@ -433,7 +466,7 @@ def resize():
else:
print " using existing " + fileObject["src"] + " \tto " + fileObject["step_0_resizedImage"]
file_resolution = runAndReturn('jhead "' + fileObject["step_0_resizedImage"] + '"', 'grep "Resolution"')
file_resolution = run_and_return('jhead "' + fileObject["step_0_resizedImage"] + '"', 'grep "Resolution"')
match = re.search(": ([0-9]*) x ([0-9]*)", file_resolution)
if match:
fileObject["width"] = int(match.group(1).strip())
@ -443,7 +476,9 @@ def resize():
if args.end_with != "resize":
getKeypoints()
def getKeypoints():
"""Run vlsift to create keypoint files for each image"""
print "\n - finding keypoints - " + now()
os.chdir(jobOptions["jobDir"])
@ -480,7 +515,9 @@ def getKeypoints():
if args.end_with != "getKeypoints":
match()
def match():
"""Run matches on images"""
print "\n - matching keypoints - " + now()
os.chdir(jobOptions["jobDir"])
@ -491,7 +528,7 @@ def match():
matchesJobs = ""
c = 0
t = (objectStats["good"] - 1) * (objectStats["good"] / 2)
t = (objectStats["good"] - 1) * (objectStats["good"] / 2) # BUG:unused
preselected_pairs = []
@ -508,7 +545,7 @@ def match():
if args.matcher_preselect:
useKnn = True
if args.matcher_useKnn:
useKnn = False
useKnn = False # BUG: never used
preselected_pairs = knnMatch_exif.preselect_pairs(BIN_PATH + "/odm_extract_utm", jobOptions["step_2_filelist"], args.matcher_kDistance, args.matcher_useKnn)
if len(preselected_pairs) != 0:
for i, j, in preselected_pairs:
@ -556,7 +593,9 @@ def match():
if args.end_with != "match":
bundler()
def bundler():
"""Run bundler and prepare bundle for PMVS"""
print "\n - running bundler - " + now()
os.chdir(jobOptions["jobDir"])
@ -626,7 +665,9 @@ def bundler():
if args.end_with != "bundler":
cmvs()
def cmvs():
"""Run CMVS"""
print "\n - running cmvs - " + now()
os.chdir(jobOptions["jobDir"])
@ -637,7 +678,9 @@ def cmvs():
if args.end_with != "cmvs":
pmvs()
def pmvs():
"""Run PMVS"""
print "\n - running pmvs - " + now()
os.chdir(jobOptions["jobDir"])
@ -649,7 +692,9 @@ def pmvs():
if args.end_with != "pmvs":
odm_meshing()
def odm_meshing():
"""Run odm_meshing"""
print "\n - running meshing - " + now()
os.chdir(jobOptions["jobDir"])
@ -663,7 +708,9 @@ def odm_meshing():
if args.end_with != "odm_meshing":
odm_texturing()
def odm_texturing():
"""Run odm_texturing"""
print "\n - running texturing - " + now()
os.chdir(jobOptions["jobDir"])
@ -678,7 +725,9 @@ def odm_texturing():
if args.end_with != "odm_texturing":
odm_georeferencing()
def odm_georeferencing():
"""Run odm_georeferencing"""
print "\n - running georeferencing - " + now()
os.chdir(jobOptions["jobDir"])
@ -687,7 +736,7 @@ def odm_georeferencing():
except:
pass
if args.odm_georeferencing_useGcp == False:
if not args.odm_georeferencing_useGcp:
run("\"" + BIN_PATH + "/odm_extract_utm\" -imagesPath " + jobOptions["srcDir"] + "/ -imageListFile " + jobOptions["jobDir"] + "/pmvs/list.rd.txt -outputCoordFile " + jobOptions["jobDir"] + "/odm_georeferencing/coordFile.txt")
run("\"" + BIN_PATH + "/odm_georef\" -bundleFile " + jobOptions["jobDir"] + "/pmvs/bundle.rd.out -inputCoordFile " + jobOptions["jobDir"] + "/odm_georeferencing/coordFile.txt -inputFile " + jobOptions["jobDir"] + "-results/odm_texturing/odm_textured_model.obj -outputFile " + jobOptions["jobDir"] + "-results/odm_texturing/odm_textured_model_geo.obj -inputPointCloudFile " + jobOptions["jobDir"] + "-results/option-0000.ply -outputPointCloudFile " + jobOptions["jobDir"] + "-results/option-0000_georef.ply -logFile " + jobOptions["jobDir"] + "/odm_georeferencing/odm_georeferencing_log.txt -georefFileOutputPath " + jobOptions["jobDir"] + "-results/odm_texturing/odm_textured_model_geo_georef_system.txt")
elif os.path.isfile(jobOptions["srcDir"] + "/" + args.odm_georeferencing_gcpFile):
@ -697,8 +746,8 @@ def odm_georeferencing():
print "Skipping orthophoto"
args.end_with = "odm_georeferencing"
if not "csString" in jobOptions:
parseCoordinateSystem()
if "csString" not in jobOptions:
parse_coordinate_system()
if "csString" in jobOptions and "utmEastOffset" in jobOptions and "utmNorthOffset" in jobOptions:
images = []
@ -754,7 +803,7 @@ def odm_georeferencing():
exivCmd += " -M\"set Exif.GPSInfo.GPSLongitude " + lonDeg + "/1 " + lonMin + "/1 " + lonSecNumerator + "/" + lonSecDenominator + "\""
exivCmd += " -M\"set Exif.GPSInfo.GPSLongitudeRef " + lonRef + "\""
altNumerator = arcDenominator = 0
altNumerator = arcDenominator = 0 # BUG: arcDenominator is never used
if altString:
altFrac = fractions.Fraction(altString)
altNumerator = str(altFrac._numerator)
@ -779,6 +828,7 @@ def odm_georeferencing():
def odm_orthophoto():
"""Run odm_orthophoto"""
print "\n - running orthophoto generation - " + now()
os.chdir(jobOptions["jobDir"])
@ -789,32 +839,42 @@ def odm_orthophoto():
run("\"" + BIN_PATH + "/odm_orthophoto\" -inputFile " + jobOptions["jobDir"] + "-results/odm_texturing/odm_textured_model_geo.obj -logFile " + jobOptions["jobDir"] + "/odm_orthophoto/odm_orthophoto_log.txt -outputFile " + jobOptions["jobDir"] + "-results/odm_orthphoto.png -resolution 20.0 -outputCornerFile " + jobOptions["jobDir"] + "/odm_orthphoto_corners.txt")
if not "csString" in jobOptions:
parseCoordinateSystem()
if "csString" not in jobOptions:
parse_coordinate_system()
geoTiffCreated = False
if "csString" in jobOptions and "utmEastOffset" in jobOptions and "utmNorthOffset" in jobOptions:
if ("csString" in jobOptions and
"utmEastOffset" in jobOptions and "utmNorthOffset" in jobOptions):
ulx = uly = lrx = lry = 0.0
with open(jobOptions["jobDir"] + "/odm_orthphoto_corners.txt") as f:
with open(jobOptions["jobDir"] +
"/odm_orthphoto_corners.txt") as f:
for lineNumber, line in enumerate(f):
if lineNumber == 0:
tokens = line.split(' ')
if len(tokens) == 4:
ulx = float(tokens[0]) + float(jobOptions["utmEastOffset"])
lry = float(tokens[1]) + float(jobOptions["utmNorthOffset"])
lrx = float(tokens[2]) + float(jobOptions["utmEastOffset"])
uly = float(tokens[3]) + float(jobOptions["utmNorthOffset"])
ulx = float(tokens[0]) + \
float(jobOptions["utmEastOffset"])
lry = float(tokens[1]) + \
float(jobOptions["utmNorthOffset"])
lrx = float(tokens[2]) + \
float(jobOptions["utmEastOffset"])
uly = float(tokens[3]) + \
float(jobOptions["utmNorthOffset"])
print(" Creating GeoTIFF...")
sys.stdout.write(" ")
run("gdal_translate -a_ullr " + str(ulx) + " " + str(uly) + " " + str(lrx) + " " + str(lry) + " -a_srs \"" + jobOptions["csString"] + "\" " + jobOptions["jobDir"] + "-results/odm_orthphoto.png " + jobOptions["jobDir"] + "-results/odm_orthphoto.tif")
run("gdal_translate -a_ullr " + str(ulx) + " " + str(uly) + " " +
str(lrx) + " " + str(lry) + " -a_srs \"" + jobOptions["csString"] +
"\" " + jobOptions["jobDir"] + "-results/odm_orthphoto.png " +
jobOptions["jobDir"] + "-results/odm_orthphoto.tif")
geoTiffCreated = True
if not geoTiffCreated:
print " Warning: No geo-referenced orthophoto created due to missing geo-referencing or corner coordinates."
print(" Warning: No geo-referenced orthophoto created due to "
"missing geo-referencing or corner coordinates.")
#parseArgs()
prepareObjects()
if __name__ == '__main__':
prepare_objects()
os.chdir(jobOptions["jobDir"])
@ -841,6 +901,7 @@ elif args.start_with == "odm_orthophoto":
if args.zip_results:
print "\nCompressing results - " + now()
run("cd " + jobOptions["jobDir"] + "-results/ && tar -czf " + jobOptions["jobDir"] + "-results.tar.gz *")
run("cd " + jobOptions["jobDir"] + "-results/ && tar -czf " +
jobOptions["jobDir"] + "-results.tar.gz *")
print "\n - done - " + now()