kopia lustrzana https://github.com/OpenDroneMap/ODM
Porównaj commity
281 Commity
Autor | SHA1 | Data |
---|---|---|
Piero Toffanin | ae6726e536 | |
Piero Toffanin | 6da366f806 | |
Piero Toffanin | e4e27c21f2 | |
Piero Toffanin | f9136f7a0d | |
idimitrovski | a2d9eccad5 | |
Piero Toffanin | 424d9e28a0 | |
Andrew Harvey | a0fbd71d41 | |
Piero Toffanin | 6084d1dca0 | |
Piero Toffanin | aef4182cf9 | |
Piero Toffanin | 6c0fe6e79d | |
Piero Toffanin | 17dfc7599a | |
Piero Toffanin | a70e7445ad | |
Piero Toffanin | 981bf88b48 | |
Piero Toffanin | ad63392e1a | |
Piero Toffanin | 77f8ffc8cd | |
Piero Toffanin | 4d7cf32a8c | |
Stephen Mather | 5a439c0ab6 | |
Piero Toffanin | ffcda0dc57 | |
Stephen Mather | 2c6fd1dd9f | |
Sylvain POULAIN | cb3229a3d4 | |
Piero Toffanin | fc9c94880f | |
kielnino | b204a2eb98 | |
Piero Toffanin | d9f77bea54 | |
kielnino | 10947ecddf | |
kielnino | f7c7044823 | |
Piero Toffanin | ae50133886 | |
Piero Toffanin | 9fd3bf3edd | |
Piero Toffanin | fb85b754fb | |
Piero Toffanin | 30f89c068c | |
Piero Toffanin | 260b4ef864 | |
Piero Toffanin | fb5d88366e | |
Piero Toffanin | f793627402 | |
Piero Toffanin | 9183218f1b | |
Piero Toffanin | 1283df206e | |
Piero Toffanin | 76a061b86a | |
Piero Toffanin | 32d933027e | |
Piero Toffanin | a29280157e | |
Piero Toffanin | 704c285b8f | |
Piero Toffanin | 5674e68e9f | |
Piero Toffanin | d419d9f038 | |
Piero Toffanin | b3ae35f5e5 | |
Piero Toffanin | 18d4d31be7 | |
Piero Toffanin | 16ccd277ec | |
Piero Toffanin | 7048868f28 | |
Piero Toffanin | b14ffd919a | |
Piero Toffanin | 4d1d0350a5 | |
Piero Toffanin | 7261c29efc | |
Piero Toffanin | 2ccad6ee9d | |
Piero Toffanin | 6acf9835e5 | |
Piero Toffanin | 5b5df3aaf7 | |
Piero Toffanin | 26cc9fbf93 | |
Piero Toffanin | b08f955963 | |
Piero Toffanin | d028873f63 | |
Piero Toffanin | 2d2b809530 | |
Piero Toffanin | 7e05a5b04e | |
Piero Toffanin | e0ab6ae7ed | |
Piero Toffanin | eceae8d2e4 | |
Piero Toffanin | 55570385c1 | |
Piero Toffanin | eed840c9bb | |
Piero Toffanin | 8376f24f08 | |
Piero Toffanin | 6d70a4f0be | |
Piero Toffanin | 6df5e0b711 | |
Piero Toffanin | 5d9564fda3 | |
Piero Toffanin | eccb203d7a | |
Piero Toffanin | 2df4afaecf | |
Piero Toffanin | e5ed68846e | |
Piero Toffanin | 7cf71628f3 | |
Piero Toffanin | 237bf8fb87 | |
Piero Toffanin | a542e7b78d | |
Piero Toffanin | 52fa5d12e6 | |
Piero Toffanin | e3296f0379 | |
Piero Toffanin | a06f6f19b2 | |
Piero Toffanin | 2d94934595 | |
Piero Toffanin | 08d03905e6 | |
Merten Fermont | f70e55c9eb | |
Merten Fermont | a89803c2eb | |
Piero Toffanin | de7595aeef | |
Piero Toffanin | aa0e9f68df | |
Piero Toffanin | 7ca122dbf6 | |
Piero Toffanin | 0d303aab16 | |
Piero Toffanin | 6dc0c98fa0 | |
Merten Fermont | c679d400c8 | |
Piero Toffanin | 38af615657 | |
Piero Toffanin | fc8dd7c5c5 | |
Piero Toffanin | 6eca279c4b | |
Piero Toffanin | 681ee18925 | |
Piero Toffanin | f9a3c5eb0e | |
Piero Toffanin | a56b52d0df | |
Piero Toffanin | f6be28db2a | |
Piero Toffanin | 5988be1f57 | |
Piero Toffanin | d9600741d1 | |
Piero Toffanin | 57c61d918d | |
Piero Toffanin | 7277eabd0b | |
Piero Toffanin | d78b8ff399 | |
Piero Toffanin | d10bef2631 | |
Piero Toffanin | 2930927207 | |
Piero Toffanin | 83fef16cb1 | |
Piero Toffanin | 2fea4d9f3d | |
Piero Toffanin | 50162147ce | |
Piero Toffanin | 07b641dc09 | |
Piero Toffanin | d2cd5d9336 | |
Piero Toffanin | 340e32af8f | |
Piero Toffanin | 8276751d07 | |
Piero Toffanin | ebba01aad5 | |
Piero Toffanin | f4549846de | |
Piero Toffanin | f5604a05a8 | |
Piero Toffanin | 3fc46a1e04 | |
Piero Toffanin | 4b8cf9af3d | |
Piero Toffanin | e9e18050a2 | |
Piero Toffanin | 9d15982850 | |
mdchia | 820ea4a4e3 | |
Saijin-Naib | e84c77dd56 | |
Stephen Mather | d929d7b8fa | |
Piero Toffanin | b948109e8f | |
Sebastien | c3593c0f69 | |
Sebastien | 5a20a22a1a | |
Adrien-ANTON-LUDWIG | b4aa3a9be0 | |
Adrien-ANTON-LUDWIG | 65c20796be | |
Piero Toffanin | 8bc251aea2 | |
Piero Toffanin | c32a8a5c59 | |
Piero Toffanin | f75a87977e | |
Piero Toffanin | e329c9a77b | |
rexliuser | be1fec2bd7 | |
Adrien-ANTON-LUDWIG | 87f82a1582 | |
Adrien-ANTON-LUDWIG | 9b9ba724c6 | |
Adrien-ANTON-LUDWIG | ee5ff3258f | |
Piero Toffanin | 80fd9dffdc | |
fr-damo | df0ea97321 | |
Piero Toffanin | 967fec0974 | |
fr-damo | e1b5a5ef65 | |
Piero Toffanin | 8121fca607 | |
Piero Toffanin | 80c4ce517c | |
udaf-mcq | afd38f631d | |
Piero Toffanin | eb95137a4c | |
Sebastien | eb4f30651e | |
Piero Toffanin | cefcfde07d | |
Piero Toffanin | b620e4e6cc | |
Liuxuyang | 8a4a309ceb | |
Piero Toffanin | cfa689b5da | |
Piero Toffanin | 0b8c75ca10 | |
Piero Toffanin | 3a4b98a7eb | |
Piero Toffanin | c2ab760dd9 | |
Piero Toffanin | dee9feed17 | |
Piero Toffanin | 542dd6d053 | |
Piero Toffanin | 5deab15e5f | |
Piero Toffanin | 6d37355d6b | |
Piero Toffanin | ba1cc39adb | |
Piero Toffanin | 54b0ac9bb0 | |
Piero Toffanin | 12b8f43912 | |
Piero Toffanin | ad091fd9af | |
Piero Toffanin | a2e63508c2 | |
Piero Toffanin | bebea18697 | |
Piero Toffanin | 58c9fd2231 | |
Piero Toffanin | 567cc3c872 | |
Piero Toffanin | 59019dac66 | |
Piero Toffanin | ef1ea9a067 | |
Piero Toffanin | 9014912c98 | |
Piero Toffanin | ad100525b5 | |
Piero Toffanin | 6ebb8b50d7 | |
Piero Toffanin | 8c300ab4de | |
Piero Toffanin | 609abfd115 | |
Howard Butler | 607ce5ffa6 | |
Piero Toffanin | ce6c745715 | |
Piero Toffanin | 4dd4da20c3 | |
Piero Toffanin | adc0570c53 | |
Piero Toffanin | 552b45bce4 | |
Piero Toffanin | 27abb8bb10 | |
Piero Toffanin | 05e8323174 | |
Piero Toffanin | f172e91b7e | |
Piero Toffanin | ed07b18bad | |
Piero Toffanin | 3535c64347 | |
Piero Toffanin | b076b667a4 | |
Piero Toffanin | 8e735e01d3 | |
Piero Toffanin | 396dde0d2c | |
Piero Toffanin | 4c7c37bbd4 | |
Piero Toffanin | 182bcfa68f | |
Piero Toffanin | 5db0d0111d | |
Piero Toffanin | 80e4b4d649 | |
rexliuser | 4a26aa1c9c | |
Piero Toffanin | a922aaecbc | |
Stephen Mather | 7be148a90a | |
Stephen Mather | 3f1975b353 | |
Piero Toffanin | b8965b50db | |
Piero Toffanin | ffad2b02e8 | |
Piero Toffanin | 1ae7974019 | |
Piero Toffanin | c0d5e21d38 | |
Piero Toffanin | f82b6a1f82 | |
Antonio Eugenio Burriel | ca7abe165a | |
Antonio Eugenio Burriel | 0f595cab80 | |
Piero Toffanin | d340d8601d | |
Stephen Mather | 14048cc049 | |
Piero Toffanin | f7c87172e9 | |
Piero Toffanin | c34f227157 | |
Piero Toffanin | 7aade078ad | |
Piero Toffanin | ac89d2212e | |
Piero Toffanin | cdf876a46b | |
Piero Toffanin | 8c0e1b3173 | |
Piero Toffanin | f27b611c43 | |
Piero Toffanin | e736670094 | |
Piero Toffanin | f8cd626ae8 | |
Piero Toffanin | 21e9df61f7 | |
Piero Toffanin | 2c8780c4d1 | |
Yunpeng Li | 1ea2a990e5 | |
Piero Toffanin | 706221c626 | |
Piero Toffanin | 02570ed632 | |
Piero Toffanin | 7048dd86fd | |
Piero Toffanin | bd0f33f978 | |
Piero Toffanin | 2361fce01d | |
Piero Toffanin | 6c94338a85 | |
Esteban | a2ee77b114 | |
Piero Toffanin | 6c32fc0594 | |
Esteban | a11992ab0f | |
Esteban | 9735c1cff8 | |
Piero Toffanin | 7bf91d1402 | |
Piero Toffanin | 4798aefc6a | |
Esteban | 59df84f1a8 | |
Esteban | 749f90bc37 | |
Piero Toffanin | 91201d5842 | |
Piero Toffanin | 41020ef1a8 | |
Piero Toffanin | 51feb49d09 | |
Piero Toffanin | f60dc33df0 | |
Piero Toffanin | c4874df8cb | |
Luca Di Leo | f89ddfb1bd | |
Luca Di Leo | d013539275 | |
Luca Di Leo | 3942755b10 | |
Luca Di Leo | d2ad5bac49 | |
Piero Toffanin | 4b3306ec9e | |
Piero Toffanin | 1d4827dd32 | |
Piero Toffanin | 02e4851230 | |
Piero Toffanin | 976db04148 | |
Piero Toffanin | 7cbe959da6 | |
Piero Toffanin | 6a7ab131ca | |
Piero Toffanin | b404366725 | |
Piero Toffanin | 4aa83c9956 | |
Piero Toffanin | 61483d9287 | |
Piero Toffanin | ddc1bb26b1 | |
Piero Toffanin | b0040f8f34 | |
Piero Toffanin | 61cff70be6 | |
Piero Toffanin | 24575bb25c | |
Piero Toffanin | ec6af4aa04 | |
Piero Toffanin | a2698f3ec9 | |
Piero Toffanin | 73887c6bcf | |
Piero Toffanin | 7bd81a93a2 | |
lurenzzzz | c85c54f505 | |
Piero Toffanin | 066e5bebb4 | |
HeDo | 6b0f8f62ff | |
HeDo | be142549e3 | |
Piero Toffanin | ffa7871c33 | |
Piero Toffanin | 473d496620 | |
Luca Di Leo | 7ace79cdc4 | |
Luca Di Leo | 31bfa95f19 | |
Luca Di Leo | fa3eb4af96 | |
HeDo | 02b92d322c | |
HeDo | 240ab7b108 | |
HeDo | 93be23b9ba | |
HeDo | c5f67024d1 | |
HeDo | 08b2755c6c | |
Piero Toffanin | 5ac36051a4 | |
Piero Toffanin | 266db75e36 | |
Piero Toffanin | b392c7a09d | |
Piero Toffanin | 22464d85f3 | |
zfb132 | 197981440b | |
HeDo | 34311a2380 | |
HeDo | 7c855688a1 | |
Piero Toffanin | 5259fd7007 | |
Piero Toffanin | 4f660ffd44 | |
Piero Toffanin | a58c50a663 | |
Piero Toffanin | 3d4725c615 | |
Piero Toffanin | 4a0b60bf70 | |
Piero Toffanin | 92cab06a51 | |
Esteban | 8f7755d4f5 | |
Esteban | 952cdf8b4b | |
Piero Toffanin | 74fcfe0e44 | |
Piero Toffanin | 0d6d2e6631 | |
Piero Toffanin | f72e9cc259 | |
Piero Toffanin | cd31933002 | |
Piero Toffanin | 6a72cb011f | |
Piero Toffanin | 759c2dbfba | |
Piero Toffanin | 09bf59ab87 | |
ckato | 91959bf299 | |
Piero Toffanin | 0289ab5062 |
|
@ -0,0 +1,33 @@
|
|||
name: Issue Triage
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
jobs:
|
||||
issue_triage:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: pierotofy/issuewhiz@v1
|
||||
with:
|
||||
ghToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
openAI: ${{ secrets.OPENAI_TOKEN }}
|
||||
filter: |
|
||||
- "#"
|
||||
variables: |
|
||||
- Q: "A question about using a software or seeking guidance on doing something?"
|
||||
- B: "Reporting an issue or a software bug?"
|
||||
- P: "Describes an issue with processing a set of images or a particular dataset?"
|
||||
- D: "Contains a link to a dataset or images?"
|
||||
- E: "Contains a suggestion for an improvement or a feature request?"
|
||||
- SC: "Describes an issue related to compiling or building source code?"
|
||||
logic: |
|
||||
- 'Q and (not B) and (not P) and (not E) and (not SC) and not (title_lowercase ~= ".*bug: .+")': [comment: "Could we move this conversation over to the forum at https://community.opendronemap.org? The forum is the right place to ask questions (we try to keep the GitHub issue tracker for feature requests and bugs only). Thank you!", close: true, stop: true]
|
||||
- "B and (not P) and (not E) and (not SC)": [label: "software fault", stop: true]
|
||||
- "P and D": [label: "possible software fault", stop: true]
|
||||
- "P and (not D) and (not SC) and (not E)": [comment: "Thanks for the report, but it looks like you didn't include a copy of your dataset for us to reproduce this issue? Please make sure to follow our [issue guidelines](https://github.com/OpenDroneMap/ODM/blob/master/docs/issue_template.md) :pray: ", close: true, stop: true]
|
||||
- "E": [label: enhancement, stop: true]
|
||||
- "SC": [label: "possible software fault"]
|
||||
|
||||
signature: "p.s. I'm just an automated script, not a human being."
|
|
@ -1,98 +0,0 @@
|
|||
name: Publish Docker and WSL Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
timeout-minutes: 2880
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 12
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 1
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# Use the repository information of the checked-out code to format docker tags
|
||||
- name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v1
|
||||
with:
|
||||
images: opendronemap/odm
|
||||
tag-semver: |
|
||||
{{version}}
|
||||
- name: Build and push Docker image
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: ./portable.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
no-cache: true
|
||||
tags: |
|
||||
${{ steps.docker_meta.outputs.tags }}
|
||||
opendronemap/odm:latest
|
||||
- name: Export WSL image
|
||||
id: wsl_export
|
||||
run: |
|
||||
docker pull opendronemap/odm
|
||||
docker export $(docker create opendronemap/odm) --output odm-wsl-rootfs-amd64.tar.gz
|
||||
gzip odm-wsl-rootfs-amd64.tar.gz
|
||||
echo ::set-output name=amd64-rootfs::"odm-wsl-rootfs-amd64.tar.gz"
|
||||
# Convert tag into a GitHub Release if we're building a tag
|
||||
- name: Create Release
|
||||
if: github.event_name == 'tag'
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
# Upload the WSL image to the new Release if we're building a tag
|
||||
- name: Upload amd64 Release Asset
|
||||
if: github.event_name == 'tag'
|
||||
id: upload-amd64-wsl-rootfs
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./${{ steps.wsl_export.outputs.amd64-rootfs }}
|
||||
asset_name: ${{ steps.wsl_export.outputs.amd64-rootfs }}
|
||||
asset_content_type: application/gzip
|
||||
# Always archive the WSL rootfs
|
||||
- name: Upload amd64 Artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: wsl-rootfs
|
||||
path: ${{ steps.wsl_export.outputs.amd64-rootfs }}
|
||||
- name: Docker image digest and WSL rootfs download URL
|
||||
run: |
|
||||
echo "Docker image digest: ${{ steps.docker_build.outputs.digest }}"
|
||||
echo "WSL AMD64 rootfs URL: ${{ steps.upload-amd64-wsl-rootfs.browser_download_url }}"
|
||||
# Trigger NodeODM build
|
||||
- name: Dispatch NodeODM Build Event
|
||||
id: nodeodm_dispatch
|
||||
run: |
|
||||
curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker.yaml/dispatches --data '{"ref": "master"}'
|
|
@ -9,14 +9,11 @@ on:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: self-hosted
|
||||
timeout-minutes: 2880
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 12
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
name: Publish Docker and WSL Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
timeout-minutes: 2880
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 1
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# Use the repository information of the checked-out code to format docker tags
|
||||
- name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v1
|
||||
with:
|
||||
images: opendronemap/odm
|
||||
tag-semver: |
|
||||
{{version}}
|
||||
- name: Build and push Docker image
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: ./portable.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
no-cache: true
|
||||
tags: |
|
||||
${{ steps.docker_meta.outputs.tags }}
|
||||
opendronemap/odm:latest
|
||||
# Trigger NodeODM build
|
||||
- name: Dispatch NodeODM Build Event
|
||||
id: nodeodm_dispatch
|
||||
run: |
|
||||
curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker.yaml/dispatches --data '{"ref": "master"}'
|
|
@ -1,51 +0,0 @@
|
|||
name: Publish Snap
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- v**
|
||||
|
||||
jobs:
|
||||
build-and-release:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture:
|
||||
- amd64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 12
|
||||
- name: Build
|
||||
id: build
|
||||
uses: diddlesnaps/snapcraft-multiarch-action@v1
|
||||
with:
|
||||
architecture: ${{ matrix.architecture }}
|
||||
- name: Publish unstable builds to Edge
|
||||
if: github.ref == 'refs/heads/master'
|
||||
uses: snapcore/action-publish@v1
|
||||
with:
|
||||
store_login: ${{ secrets.STORE_LOGIN }}
|
||||
snap: ${{ steps.build.outputs.snap }}
|
||||
release: edge
|
||||
- name: Publish tagged prerelease builds to Beta
|
||||
# These are identified by having a hyphen in the tag name, e.g.: v1.0.0-beta1
|
||||
if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '-')
|
||||
uses: snapcore/action-publish@v1
|
||||
with:
|
||||
store_login: ${{ secrets.STORE_LOGIN }}
|
||||
snap: ${{ steps.build.outputs.snap }}
|
||||
release: beta
|
||||
- name: Publish tagged stable or release-candidate builds to Candidate
|
||||
# These are identified by NOT having a hyphen in the tag name, OR having "-RC" or "-rc" in the tag name.
|
||||
if: startsWith(github.ref, 'refs/tags/v1') && ( ( ! contains(github.ref, '-') ) || contains(github.ref, '-RC') || contains(github.ref, '-rc') )
|
||||
uses: snapcore/action-publish@v1
|
||||
with:
|
||||
store_login: ${{ secrets.STORE_LOGIN }}
|
||||
snap: ${{ steps.build.outputs.snap }}
|
||||
release: candidate
|
|
@ -38,6 +38,11 @@ jobs:
|
|||
- name: Build sources
|
||||
run: |
|
||||
python configure.py build
|
||||
- name: Free up space
|
||||
run: |
|
||||
rmdir SuperBuild\download /s /q
|
||||
rmdir SuperBuild\build /s /q
|
||||
shell: cmd
|
||||
- name: Create setup
|
||||
env:
|
||||
CODE_SIGN_CERT_PATH: ${{ steps.code_sign.outputs.filePath }}
|
||||
|
|
|
@ -58,17 +58,30 @@ jobs:
|
|||
with:
|
||||
python-version: '3.8.1'
|
||||
architecture: 'x64'
|
||||
- uses: Jimver/cuda-toolkit@v0.2.4
|
||||
id: cuda-toolkit
|
||||
with:
|
||||
cuda: '11.4.0'
|
||||
- name: Setup cmake
|
||||
uses: jwlawson/actions-setup-cmake@v1.13
|
||||
with:
|
||||
cmake-version: '3.24.x'
|
||||
- name: Setup Visual C++
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
with:
|
||||
arch: x64
|
||||
- name: Install venv
|
||||
run: |
|
||||
python -m pip install virtualenv
|
||||
- name: Build sources
|
||||
run: |
|
||||
python configure.py build
|
||||
python configure.py build
|
||||
- name: Free up space
|
||||
run: |
|
||||
rmdir SuperBuild\download /s /q
|
||||
rmdir SuperBuild\build /s /q
|
||||
shell: cmd
|
||||
- name: Create setup
|
||||
run: |
|
||||
python configure.py dist
|
||||
- name: Upload Setup File
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Setup
|
||||
path: dist\*.exe
|
||||
|
|
|
@ -28,3 +28,10 @@ settings.yaml
|
|||
__pycache__
|
||||
*.snap
|
||||
storage/
|
||||
|
||||
|
||||
vcpkg/
|
||||
venv/
|
||||
python38/
|
||||
dist/
|
||||
innosetup/
|
86
README.md
86
README.md
|
@ -83,30 +83,6 @@ ODM can be installed natively on Windows. Just download the latest setup from th
|
|||
run C:\Users\youruser\datasets\project [--additional --parameters --here]
|
||||
```
|
||||
|
||||
## Snap Package
|
||||
|
||||
ODM is now available as a Snap Package from the Snap Store. To install you may use the Snap Store (available itself as a Snap Package) or the command line:
|
||||
|
||||
```bash
|
||||
sudo snap install --edge opendronemap
|
||||
```
|
||||
|
||||
To run, you will need a terminal window into which you can type:
|
||||
|
||||
```bash
|
||||
opendronemap
|
||||
|
||||
# or
|
||||
|
||||
snap run opendronemap
|
||||
|
||||
# or
|
||||
|
||||
/snap/bin/opendronemap
|
||||
```
|
||||
|
||||
Snap packages will be kept up-to-date automatically, so you don't need to update ODM manually.
|
||||
|
||||
## GPU Acceleration
|
||||
|
||||
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag:
|
||||
|
@ -147,52 +123,6 @@ You're in good shape!
|
|||
|
||||
See https://github.com/NVIDIA/nvidia-docker and https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker for information on docker/NVIDIA setup.
|
||||
|
||||
## WSL or WSL2 Install
|
||||
|
||||
Note: This requires that you have installed WSL already by following [the instructions on Microsoft's Website](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
|
||||
|
||||
You can run ODM via WSL or WSL2 by downloading the `rootfs.tar.gz` file from [the releases page on GitHub](https://github.com/OpenDroneMap/ODM/releases). Once you have the file saved to your `Downloads` folder in Windows, open a PowerShell or CMD window by right-clicking the Flag Menu (bottom left by default) and selecting "Windows PowerShell", or alternatively by using the [Windows Terminal from the Windows Store](https://www.microsoft.com/store/productId/9N0DX20HK701).
|
||||
|
||||
Inside a PowerShell window, or Windows Terminal running PowerShell, type the following:
|
||||
|
||||
```powershell
|
||||
# PowerShell
|
||||
wsl.exe --import ODM $env:APPDATA\ODM C:\path\to\your\Downloads\rootfs.tar.gz
|
||||
```
|
||||
|
||||
Alternatively if you're using `CMD.exe` or the `CMD` support in Windows Terminal type:
|
||||
|
||||
```cmd
|
||||
# CMD
|
||||
wsl.exe --import ODM %APPDATA%\ODM C:\path\to\your\Downloads\rootfs.tar.gz
|
||||
```
|
||||
|
||||
In either case, make sure you replace `C:\path\to\your\Downloads\rootfs.tar.gz` with the actual path to your `rootfs.tar.gz` file.
|
||||
|
||||
This will save a new Hard Disk image to your Windows `AppData` folder at `C:\Users\username\AppData\roaming\ODM` (where `username` is your Username in Windows), and will set-up a new WSL "distro" called `ODM`.
|
||||
|
||||
You may start the ODM distro by using the relevant option in the Windows Terminal (from the Windows Store) or by executing `wsl.exe -d ODM` in a PowerShell or CMD window.
|
||||
|
||||
ODM is installed to the distro's `/code` directory. You may execute it with:
|
||||
|
||||
```bash
|
||||
/code/run.sh
|
||||
```
|
||||
|
||||
### Updating ODM in WSL
|
||||
|
||||
The easiest way to update the installation of ODM is to download the new `rootfs.tar.gz` file and import it as another distro. You may then unregister the original instance the same way you delete ODM from WSL (see next heading).
|
||||
|
||||
### Deleting an ODM in WSL instance
|
||||
|
||||
```cmd
|
||||
wsl.exe --unregister ODM
|
||||
```
|
||||
|
||||
Finally you'll want to delete the files by using your Windows File Manager (Explorer) to navigate to `%APPDATA%`, find the `ODM` directory, and delete it by dragging it to the recycle bin. To permanently delete it empty the recycle bin.
|
||||
|
||||
If you have installed to a different directory by changing the `--import` command you ran to install you must use that directory name to delete the correct files. This is likely the case if you have multiple ODM installations or are updating an already-installed installation.
|
||||
|
||||
## Native Install (Ubuntu 21.04)
|
||||
|
||||
You can run ODM natively on Ubuntu 21.04 (although we don't recommend it):
|
||||
|
@ -261,12 +191,14 @@ After this, you must restart docker.
|
|||
|
||||
## Video Support
|
||||
|
||||
Starting from version 3.0.4, ODM can automatically extract images from video files (.mp4 or .mov). Just place one or more video files into the `images` folder and run the program as usual. Subtitles files (.srt) with GPS information are also supported. Place .srt files in the `images` folder, making sure that the filenames match. For example, `my_video.mp4` ==> `my_video.srt` (case-sensitive).
|
||||
Starting from version 3.0.4, ODM can automatically extract images from video files (.mp4, .mov, .lrv, .ts). Just place one or more video files into the `images` folder and run the program as usual. Subtitles files (.srt) with GPS information are also supported. Place .srt files in the `images` folder, making sure that the filenames match. For example, `my_video.mp4` ==> `my_video.srt` (case-sensitive).
|
||||
|
||||
## Developers
|
||||
|
||||
Help improve our software! We welcome contributions from everyone, whether to add new features, improve speed, fix existing bugs or add support for more cameras. Check our [code of conduct](https://github.com/OpenDroneMap/documents/blob/master/CONDUCT.md), the [contributing guidelines](https://github.com/OpenDroneMap/documents/blob/master/CONTRIBUTING.md) and [how decisions are made](https://github.com/OpenDroneMap/documents/blob/master/GOVERNANCE.md#how-decisions-are-made).
|
||||
|
||||
|
||||
### Installation and first run
|
||||
For Linux users, the easiest way to modify the software is to make sure docker is installed, clone the repository and then run from a shell:
|
||||
|
||||
```bash
|
||||
|
@ -285,6 +217,18 @@ You can now make changes to the ODM source. When you are ready to test the chang
|
|||
```bash
|
||||
(odmdev) [user:/code] master+* ± ./run.sh --project-path /datasets mydataset
|
||||
```
|
||||
### Stop dev container
|
||||
```bash
|
||||
docker stop odmdev
|
||||
```
|
||||
### To come back to dev environement
|
||||
change your_username to your username
|
||||
```bash
|
||||
docker start odmdev
|
||||
docker exec -ti odmdev bash
|
||||
su your_username
|
||||
```
|
||||
|
||||
|
||||
If you have questions, join the developer's chat at https://community.opendronemap.org/c/developers-chat/21
|
||||
|
||||
|
|
|
@ -26,11 +26,21 @@ if (APPLE)
|
|||
# Use homebrew's clang compiler since Apple
|
||||
# does not allow us to link to libomp
|
||||
set(CXX_PATH ${HOMEBREW_INSTALL_PREFIX}/bin/c++-12)
|
||||
set(APPLE_CMAKE_ARGS "")
|
||||
|
||||
message("Checking for ${CXX_PATH}...")
|
||||
if(EXISTS "${CXX_PATH}")
|
||||
message("Found Homebrew's compiler: ${CXX_PATH}")
|
||||
message("Found Homebrew's C++ compiler: ${CXX_PATH}")
|
||||
set(CMAKE_CXX_COMPILER ${CXX_PATH})
|
||||
set(APPLE_CMAKE_ARGS "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}")
|
||||
list(APPEND APPLE_CMAKE_ARGS "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}")
|
||||
endif()
|
||||
|
||||
set(C_PATH ${HOMEBREW_INSTALL_PREFIX}/bin/gcc-12)
|
||||
message("Checking for ${C_PATH}...")
|
||||
if(EXISTS "${C_PATH}")
|
||||
message("Found Homebrew's C compiler: ${C_PATH}")
|
||||
set(CMAKE_C_COMPILER ${C_PATH})
|
||||
list(APPEND APPLE_CMAKE_ARGS "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}")
|
||||
endif()
|
||||
|
||||
if (NOT APPLE_CMAKE_ARGS)
|
||||
|
@ -132,7 +142,7 @@ SETUP_EXTERNAL_PROJECT(OpenCV ${ODM_OpenCV_Version} ${ODM_BUILD_OpenCV})
|
|||
# ---------------------------------------------------------------------------------------------
|
||||
# Google Flags library (GFlags)
|
||||
#
|
||||
set(ODM_GFlags_Version 2.1.2)
|
||||
set(ODM_GFlags_Version 2.2.2)
|
||||
option(ODM_BUILD_GFlags "Force to build GFlags library" OFF)
|
||||
|
||||
SETUP_EXTERNAL_PROJECT(GFlags ${ODM_GFlags_Version} ${ODM_BUILD_GFlags})
|
||||
|
@ -167,6 +177,9 @@ set(custom_libs OpenSfM
|
|||
FPCFilter
|
||||
PyPopsift
|
||||
Obj2Tiles
|
||||
OpenPointClass
|
||||
ExifTool
|
||||
RenderDEM
|
||||
)
|
||||
|
||||
externalproject_add(mve
|
||||
|
@ -210,7 +223,7 @@ externalproject_add(poissonrecon
|
|||
|
||||
externalproject_add(dem2mesh
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/dem2mesh.git
|
||||
GIT_TAG 300
|
||||
GIT_TAG 334
|
||||
PREFIX ${SB_BINARY_DIR}/dem2mesh
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/dem2mesh
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
|
||||
|
@ -231,13 +244,22 @@ externalproject_add(dem2points
|
|||
externalproject_add(odm_orthophoto
|
||||
DEPENDS opencv
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/odm_orthophoto.git
|
||||
GIT_TAG 290
|
||||
GIT_TAG 317
|
||||
PREFIX ${SB_BINARY_DIR}/odm_orthophoto
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/odm_orthophoto
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
|
||||
${WIN32_CMAKE_ARGS} ${WIN32_GDAL_ARGS}
|
||||
)
|
||||
|
||||
externalproject_add(fastrasterfilter
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/FastRasterFilter.git
|
||||
GIT_TAG main
|
||||
PREFIX ${SB_BINARY_DIR}/fastrasterfilter
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/fastrasterfilter
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
|
||||
${WIN32_CMAKE_ARGS} ${WIN32_GDAL_ARGS}
|
||||
)
|
||||
|
||||
externalproject_add(lastools
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/LAStools.git
|
||||
GIT_TAG 250
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
set(_proj_name exiftool)
|
||||
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
|
||||
|
||||
if (WIN32)
|
||||
ExternalProject_Add(${_proj_name}
|
||||
PREFIX ${_SB_BINARY_DIR}
|
||||
TMP_DIR ${_SB_BINARY_DIR}/tmp
|
||||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
URL https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/exiftool.zip
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ${CMAKE_COMMAND} -E copy ${SB_SOURCE_DIR}/${_proj_name}/exiftool.exe ${SB_INSTALL_DIR}/bin
|
||||
#--Output logging-------------
|
||||
LOG_DOWNLOAD OFF
|
||||
LOG_CONFIGURE OFF
|
||||
LOG_BUILD OFF
|
||||
)
|
||||
else()
|
||||
externalproject_add(${_proj_name}
|
||||
PREFIX ${_SB_BINARY_DIR}
|
||||
TMP_DIR ${_SB_BINARY_DIR}/tmp
|
||||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
|
||||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
URL https://github.com/exiftool/exiftool/archive/refs/tags/12.62.zip
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND perl Makefile.PL PREFIX=${SB_INSTALL_DIR} LIB=${SB_INSTALL_DIR}/bin/lib
|
||||
INSTALL_COMMAND make install && rm -fr ${SB_INSTALL_DIR}/man
|
||||
)
|
||||
endif()
|
|
@ -8,7 +8,7 @@ ExternalProject_Add(${_proj_name}
|
|||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/FPCFilter
|
||||
GIT_TAG main
|
||||
GIT_TAG 331
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
set(_proj_name obj2tiles)
|
||||
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
|
||||
|
||||
set(OBJ2TILES_VERSION v1.0.7)
|
||||
set(OBJ2TILES_VERSION v1.0.12)
|
||||
set(OBJ2TILES_EXT "")
|
||||
|
||||
set(OBJ2TILES_ARCH "Linux64")
|
||||
|
|
|
@ -14,7 +14,7 @@ externalproject_add(vcg
|
|||
|
||||
externalproject_add(eigen34
|
||||
GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git
|
||||
GIT_TAG 3.4
|
||||
GIT_TAG 7176ae16238ded7fb5ed30a7f5215825b3abd134
|
||||
UPDATE_COMMAND ""
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/eigen34
|
||||
CONFIGURE_COMMAND ""
|
||||
|
@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name}
|
|||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
|
||||
GIT_TAG 301
|
||||
GIT_TAG 320
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
set(_proj_name openpointclass)
|
||||
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
|
||||
|
||||
ExternalProject_Add(${_proj_name}
|
||||
DEPENDS pdal eigen34
|
||||
PREFIX ${_SB_BINARY_DIR}
|
||||
TMP_DIR ${_SB_BINARY_DIR}/tmp
|
||||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/uav4geo/OpenPointClass
|
||||
GIT_TAG v1.1.3
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
|
||||
CMAKE_ARGS
|
||||
-DPDAL_DIR=${SB_INSTALL_DIR}/lib/cmake/PDAL
|
||||
-DWITH_GBT=ON
|
||||
-DBUILD_PCTRAIN=OFF
|
||||
-DEIGEN3_INCLUDE_DIR=${SB_SOURCE_DIR}/eigen34/
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
|
||||
${WIN32_CMAKE_ARGS}
|
||||
#--Build step-----------------
|
||||
BINARY_DIR ${_SB_BINARY_DIR}
|
||||
#--Install step---------------
|
||||
INSTALL_DIR ${SB_INSTALL_DIR}
|
||||
#--Output logging-------------
|
||||
LOG_DOWNLOAD OFF
|
||||
LOG_CONFIGURE OFF
|
||||
LOG_BUILD OFF
|
||||
)
|
|
@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name}
|
|||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
|
||||
GIT_TAG 304
|
||||
GIT_TAG 330
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND git submodule update --init --recursive
|
||||
#--Configure step-------------
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
set(_proj_name pcl)
|
||||
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
|
||||
|
||||
ExternalProject_Add(${_proj_name}
|
||||
PREFIX ${_SB_BINARY_DIR}
|
||||
TMP_DIR ${_SB_BINARY_DIR}/tmp
|
||||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
URL https://github.com/PointCloudLibrary/pcl/archive/refs/tags/pcl-1.11.1.zip
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
|
||||
CMAKE_ARGS
|
||||
-DBUILD_features=OFF
|
||||
-DBUILD_filters=OFF
|
||||
-DBUILD_geometry=OFF
|
||||
-DBUILD_keypoints=OFF
|
||||
-DBUILD_outofcore=OFF
|
||||
-DBUILD_people=OFF
|
||||
-DBUILD_recognition=OFF
|
||||
-DBUILD_registration=OFF
|
||||
-DBUILD_sample_consensus=OFF
|
||||
-DBUILD_segmentation=OFF
|
||||
-DBUILD_features=OFF
|
||||
-DBUILD_surface_on_nurbs=OFF
|
||||
-DBUILD_tools=OFF
|
||||
-DBUILD_tracking=OFF
|
||||
-DBUILD_visualization=OFF
|
||||
-DWITH_OPENGL=OFF
|
||||
-DWITH_VTK=OFF
|
||||
-DWITH_QT=OFF
|
||||
-DBUILD_OPENNI=OFF
|
||||
-DBUILD_OPENNI2=OFF
|
||||
-DWITH_OPENNI=OFF
|
||||
-DWITH_OPENNI2=OFF
|
||||
-DWITH_FZAPI=OFF
|
||||
-DWITH_LIBUSB=OFF
|
||||
-DWITH_PCAP=OFF
|
||||
-DWITH_PXCAPI=OFF
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
|
||||
-DPCL_VERBOSITY_LEVEL=Error
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
|
||||
-DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON
|
||||
${WIN32_CMAKE_ARGS}
|
||||
#--Build step-----------------
|
||||
BINARY_DIR ${_SB_BINARY_DIR}
|
||||
#--Install step---------------
|
||||
INSTALL_DIR ${SB_INSTALL_DIR}
|
||||
#--Output logging-------------
|
||||
LOG_DOWNLOAD OFF
|
||||
LOG_CONFIGURE OFF
|
||||
LOG_BUILD OFF
|
||||
)
|
|
@ -16,7 +16,7 @@ ExternalProject_Add(${_proj_name}
|
|||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
URL https://github.com/PDAL/PDAL/archive/refs/tags/2.4.3.zip
|
||||
URL https://github.com/OpenDroneMap/PDAL/archive/refs/heads/333.zip
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
set(_proj_name renderdem)
|
||||
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
|
||||
|
||||
ExternalProject_Add(${_proj_name}
|
||||
DEPENDS pdal
|
||||
PREFIX ${_SB_BINARY_DIR}
|
||||
TMP_DIR ${_SB_BINARY_DIR}/tmp
|
||||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/RenderDEM
|
||||
GIT_TAG main
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
|
||||
CMAKE_ARGS
|
||||
-DPDAL_DIR=${SB_INSTALL_DIR}/lib/cmake/PDAL
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
|
||||
${WIN32_CMAKE_ARGS}
|
||||
#--Build step-----------------
|
||||
BINARY_DIR ${_SB_BINARY_DIR}
|
||||
#--Install step---------------
|
||||
INSTALL_DIR ${SB_INSTALL_DIR}
|
||||
#--Output logging-------------
|
||||
LOG_DOWNLOAD OFF
|
||||
LOG_CONFIGURE OFF
|
||||
LOG_BUILD OFF
|
||||
)
|
|
@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name}
|
|||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/untwine/
|
||||
GIT_TAG 285
|
||||
GIT_TAG 317
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND ""
|
||||
#--Configure step-------------
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
3.0.4
|
||||
3.5.1
|
||||
|
|
|
@ -135,10 +135,13 @@ def clean():
|
|||
safe_remove(os.path.join("SuperBuild", "install"))
|
||||
|
||||
def dist():
|
||||
if not os.path.exists("SuperBuild\\download"):
|
||||
if not os.path.exists("SuperBuild\\install"):
|
||||
print("You need to run configure.py build before you can run dist")
|
||||
exit(1)
|
||||
|
||||
if not os.path.exists("SuperBuild\\download"):
|
||||
os.mkdir("SuperBuild\\download")
|
||||
|
||||
# Download VC++ runtime
|
||||
vcredist_path = os.path.join("SuperBuild", "download", "vc_redist.x64.zip")
|
||||
if not os.path.isfile(vcredist_path):
|
||||
|
@ -190,7 +193,7 @@ def dist():
|
|||
z.extractall("innosetup")
|
||||
|
||||
# Run
|
||||
cs_flags = ""
|
||||
cs_flags = '/DSKIP_SIGN=1'
|
||||
if args.code_sign_cert_path:
|
||||
cs_flags = '"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"' % (signtool_path, args.code_sign_cert_path)
|
||||
run("innosetup\\iscc /Qp " + cs_flags + " \"innosetup.iss\"")
|
||||
|
|
|
@ -127,6 +127,9 @@ installreqs() {
|
|||
installdepsfromsnapcraft build openmvs
|
||||
|
||||
set -e
|
||||
|
||||
# edt requires numpy to build
|
||||
pip install --ignore-installed numpy==1.23.1
|
||||
pip install --ignore-installed -r requirements.txt
|
||||
#if [ ! -z "$GPU_INSTALL" ]; then
|
||||
#fi
|
||||
|
|
|
@ -51,9 +51,9 @@ install() {
|
|||
cmake .. && make -j$processes
|
||||
|
||||
cd /tmp
|
||||
pip download GDAL==3.5.1
|
||||
tar -xpzf GDAL-3.5.1.tar.gz
|
||||
cd GDAL-3.5.1
|
||||
pip download GDAL==3.6.2
|
||||
tar -xpzf GDAL-3.6.2.tar.gz
|
||||
cd GDAL-3.6.2
|
||||
if [ -e /opt/homebrew/bin/gdal-config ]; then
|
||||
python setup.py build_ext --gdal-config /opt/homebrew/bin/gdal-config
|
||||
else
|
||||
|
@ -61,7 +61,7 @@ install() {
|
|||
fi
|
||||
python setup.py build
|
||||
python setup.py install
|
||||
rm -fr /tmp/GDAL-3.5.1 /tmp/GDAL-3.5.1.tar.gz
|
||||
rm -fr /tmp/GDAL-3.6.2 /tmp/GDAL-3.6.2.tar.gz
|
||||
|
||||
cd ${RUNPATH}
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
# exif_binner.py
|
||||
|
||||
Bins multispectral drone images by spectral band, using EXIF data. Also verifies that each bin is complete (i.e. contains all expected bands) and can log errors to a CSV file. Excludes RGB images by default.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Pillow](https://pillow.readthedocs.io/en/stable/installation.html) library for reading images and EXIF data.
|
||||
- [tqdm](https://github.com/tqdm/tqdm#installation) for progress bars - can be removed
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
exif_binner.py <args> <path to folder of images to rename> <output folder>
|
||||
```
|
||||
|
||||
Optional arguments:
|
||||
|
||||
- `-b`/`--bands <integer>`: Number of expected bands per capture. Default: `5`
|
||||
- `-s`/`--sequential <True/False>`: Use sequential capture group in filenames rather than original capture ID. Default: `True`
|
||||
- `-z`/`--zero_pad <integer>`: If using sequential capture groups, zero-pad the group number to this many digits. 0 for no padding, -1 for auto padding. Default: `5`
|
||||
- `-w`/`--whitespace_replace <string>`: Replace whitespace characters with this character. Default: `-`
|
||||
- `-l`/`--logfile <filename>`: Write processed image metadata to this CSV file
|
||||
- `-r`/`--replace_filename <string>`: Use this instead of using the original filename in new filenames.
|
||||
- `-f`/`--force`: Do not ask for processing confirmation.
|
||||
- `-g`/`--no_grouping`: Do not apply grouping, only validate and add band name.
|
||||
- Show these on the command line with `-h`/`--help`.
|
|
@ -0,0 +1,210 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Originally developed by Ming Chia at the Australian Plant Phenomics Facility (Australian National University node)
|
||||
|
||||
# Usage:
|
||||
# exif_binner.py <args> <path to folder of images to rename> <output folder>
|
||||
|
||||
# standard libraries
|
||||
import sys
|
||||
import os
|
||||
import shutil
|
||||
import re
|
||||
import csv
|
||||
import math
|
||||
import argparse
|
||||
|
||||
# other imports
|
||||
import PIL
|
||||
from PIL import Image, ExifTags
|
||||
from tqdm import tqdm # optional: see "swap with this for no tqdm" below
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
# required args
|
||||
parser.add_argument("file_dir", help="input folder of images")
|
||||
parser.add_argument("output_dir", help="output folder to copy images to")
|
||||
|
||||
# args with defaults
|
||||
parser.add_argument("-b", "--bands", help="number of expected bands per capture", type=int, default=5)
|
||||
parser.add_argument("-s", "--sequential", help="use sequential capture group in filenames rather than original capture ID", type=bool, default=True)
|
||||
parser.add_argument("-z", "--zero_pad", help="if using sequential capture groups, zero-pad the group number to this many digits. 0 for no padding, -1 for auto padding", type=int, default=5)
|
||||
parser.add_argument("-w", "--whitespace_replace", help="replace whitespace characters with this character", type=str, default="-")
|
||||
|
||||
# optional args no defaults
|
||||
parser.add_argument("-l", "--logfile", help="write image metadata used to this CSV file", type=str)
|
||||
parser.add_argument("-r", "--replace_filename", help="use this instead of using the original filename in new filenames", type=str)
|
||||
parser.add_argument("-f", "--force", help="don't ask for confirmation", action="store_true")
|
||||
parser.add_argument("-g", "--no_grouping", help="do not apply grouping, only validate and add band name", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
file_dir = args.file_dir
|
||||
output_dir = args.output_dir
|
||||
replacement_character = args.whitespace_replace
|
||||
expected_bands = args.bands
|
||||
logfile = args.logfile
|
||||
|
||||
output_valid = os.path.join(output_dir, "valid")
|
||||
output_invalid = os.path.join(output_dir, "invalid")
|
||||
|
||||
file_count = len(os.listdir(file_dir))
|
||||
|
||||
auto_zero_pad = len(str(math.ceil(float(file_count) / float(expected_bands))))
|
||||
|
||||
if args.zero_pad >= 1:
|
||||
if int("9" * args.zero_pad) < math.ceil(float(file_count) / float(expected_bands)):
|
||||
raise ValueError("Zero pad must have more digits than maximum capture groups! Attempted to pad " + str(args.zero_pad) + " digits with "
|
||||
+ str(file_count) + " files and " + str(expected_bands) + " bands (up to " + str(math.ceil(float(file_count) / float(expected_bands)))
|
||||
+ " capture groups possible, try at least " + str(auto_zero_pad) + " digits to zero pad)")
|
||||
|
||||
if args.force is False:
|
||||
print("Input dir: " + str(file_dir) + " (" + str(file_count) + " files)")
|
||||
print("Output folder: " + str(output_dir))
|
||||
if args.replace_filename:
|
||||
print("Replacing all basic filenames with: " + args.replace_filename)
|
||||
else:
|
||||
print("Replace whitespace in filenames with: " + replacement_character)
|
||||
print("Number of expected bands: " + str(expected_bands))
|
||||
if logfile:
|
||||
print("Save image processing metadata to: " + logfile)
|
||||
confirmation = input("Confirm processing [Y/N]: ")
|
||||
if confirmation.lower() in ["y"]:
|
||||
pass
|
||||
else:
|
||||
sys.exit()
|
||||
|
||||
no_exif_n = 0
|
||||
|
||||
images = []
|
||||
|
||||
print("Indexing images ...")
|
||||
|
||||
|
||||
# for filename in os.listdir(file_dir): # swap with this for no tqdm
|
||||
for filename in tqdm(os.listdir(file_dir)):
|
||||
old_path = os.path.join(file_dir, filename)
|
||||
file_name, file_ext = os.path.splitext(filename)
|
||||
image_entry = {"name": filename, "valid": True, "band": "-", "ID": "-", "group": 0, "DateTime": "-", "error": "-"} # dashes to ensure CSV exports properly, can be blank
|
||||
try:
|
||||
img = Image.open(old_path)
|
||||
except PIL.UnidentifiedImageError as img_err:
|
||||
# if it tries importing a file it can't read as an image
|
||||
# uncomment to print errors
|
||||
# sys.stderr.write(str(img_err) + "\n")
|
||||
no_exif_n += 1
|
||||
if logfile:
|
||||
image_entry["valid"] = False
|
||||
image_entry["error"] = "Not readable as image: " + str(img_err)
|
||||
images.append(image_entry)
|
||||
continue
|
||||
for key, val in img.getexif().items():
|
||||
if key in ExifTags.TAGS:
|
||||
# print(ExifTags.TAGS[key] + ":" + str(val)) # debugging
|
||||
if ExifTags.TAGS[key] == "XMLPacket":
|
||||
# find bandname
|
||||
bandname_start = val.find(b'<Camera:BandName>')
|
||||
bandname_end = val.find(b'</Camera:BandName>')
|
||||
bandname_coded = val[(bandname_start + 17):bandname_end]
|
||||
bandname = bandname_coded.decode("UTF-8")
|
||||
image_entry["band"] = str(bandname)
|
||||
# find capture ID
|
||||
image_entry["ID"] = re.findall('CaptureUUID="([^"]*)"', str(val))[0]
|
||||
if ExifTags.TAGS[key] == "DateTime":
|
||||
image_entry["DateTime"] = str(val)
|
||||
image_entry["band"].replace(" ", "-")
|
||||
if len(image_entry["band"]) >= 99: # if it's too long, wrong value (RGB pic has none)
|
||||
# no exif present
|
||||
no_exif_n += 1
|
||||
image_entry["valid"] = False
|
||||
image_entry["error"] = "Image band name appears to be too long"
|
||||
elif image_entry["ID"] == "" and expected_bands > 1:
|
||||
no_exif_n += 1
|
||||
image_entry["valid"] = False
|
||||
image_entry["error"] = "No Capture ID found"
|
||||
if (file_ext.lower() in [".jpg", ".jpeg"]) and (image_entry["band"] == "-"): # hack for DJI RGB jpgs
|
||||
# handle = open(old_path, 'rb').read()
|
||||
# xmp_start = handle.find(b'<x:xmpmeta')
|
||||
# xmp_end = handle.find(b'</x:xmpmeta')
|
||||
# xmp_bit = handle[xmp_start:xmp_end + 12]
|
||||
# image_entry["ID"] = re.findall('CaptureUUID="([^"]*)"', str(xmp_bit))[0]
|
||||
# image_entry["band"] = "RGB" # TODO: we assume this. may not hold true for all datasets
|
||||
|
||||
no_exif_n += 1 # this is just to keep a separate invalid message, comment out this whole if block and the jpgs shoud be handled by the "no capture ID" case
|
||||
image_entry["valid"] = False
|
||||
image_entry["error"] = "RGB jpg, not counting for multispec processing"
|
||||
images.append(image_entry)
|
||||
# print(new_path) # debugging
|
||||
|
||||
print(str(no_exif_n) + " files were not multispectral images")
|
||||
no_matching_bands_n = 0
|
||||
new_capture_id = 1
|
||||
capture_ids = {}
|
||||
|
||||
images = sorted(images, key=lambda img: (img["DateTime"], img["name"]))
|
||||
|
||||
# now sort and identify valid entries
|
||||
if not args.no_grouping:
|
||||
# for this_img in images: # swap with this for no tqdm
|
||||
for this_img in tqdm(images):
|
||||
if not this_img["valid"]: # prefiltered in last loop
|
||||
continue
|
||||
same_id_images = [image for image in images if image["ID"] == this_img["ID"]]
|
||||
if len(same_id_images) != expected_bands: # defaults to True, so only need to filter out not in
|
||||
no_matching_bands_n += 1
|
||||
this_img["valid"] = False
|
||||
this_img["error"] = "Capture ID has too few/too many bands"
|
||||
else:
|
||||
if this_img["ID"] in capture_ids.keys():
|
||||
this_img["group"] = capture_ids[this_img["ID"]]
|
||||
else:
|
||||
capture_ids[this_img["ID"]] = new_capture_id
|
||||
this_img["group"] = capture_ids[this_img["ID"]] # a little less efficient but we know it works this way
|
||||
new_capture_id += 1
|
||||
print(str(no_matching_bands_n) + " images had unexpected bands in same capture")
|
||||
|
||||
os.makedirs(output_valid, exist_ok=True)
|
||||
os.makedirs(output_invalid, exist_ok=True)
|
||||
|
||||
identifier = ""
|
||||
|
||||
# then do the actual copy
|
||||
# for this_img in images: # swap with this for no tqdm
|
||||
for this_img in tqdm(images):
|
||||
old_path = os.path.join(file_dir, this_img["name"])
|
||||
file_name, file_ext = os.path.splitext(this_img["name"])
|
||||
|
||||
if args.whitespace_replace:
|
||||
file_name = replacement_character.join(file_name.split())
|
||||
if args.replace_filename and not args.no_grouping:
|
||||
file_name = args.replace_filename
|
||||
|
||||
if this_img["valid"]:
|
||||
prefix = output_valid
|
||||
if args.no_grouping:
|
||||
file_name_full = file_name + "-" + this_img["band"] + file_ext
|
||||
else:
|
||||
# set ID based on args
|
||||
if args.sequential:
|
||||
if args.zero_pad == 0:
|
||||
identifier = str(this_img["group"])
|
||||
elif args.zero_pad == -1:
|
||||
identifier = str(this_img["group"]).zfill(auto_zero_pad)
|
||||
else:
|
||||
identifier = str(this_img["group"]).zfill(args.zero_pad)
|
||||
else:
|
||||
identifier = this_img["ID"]
|
||||
file_name_full = identifier + "-" + file_name + "-" + this_img["band"] + file_ext
|
||||
else:
|
||||
prefix = output_invalid
|
||||
file_name_full = file_name + file_ext
|
||||
new_path = os.path.join(prefix, file_name_full)
|
||||
shutil.copy(old_path, new_path)
|
||||
|
||||
if logfile:
|
||||
header = images[0].keys()
|
||||
with open(logfile, 'w', newline='') as logfile_handle:
|
||||
dict_writer = csv.DictWriter(logfile_handle, header)
|
||||
dict_writer.writeheader()
|
||||
dict_writer.writerows(images)
|
||||
|
||||
print("Done!")
|
|
@ -51,6 +51,5 @@ commands.create_dem(args.point_cloud,
|
|||
outdir=outdir,
|
||||
resolution=args.resolution,
|
||||
decimation=1,
|
||||
max_workers=multiprocessing.cpu_count(),
|
||||
keep_unfilled_copy=False
|
||||
max_workers=multiprocessing.cpu_count()
|
||||
)
|
|
@ -20,7 +20,7 @@ Please use the format below to report bugs and faults.
|
|||
|
||||
[Type answer here]
|
||||
|
||||
### How can we reproduce this? What steps did you do to trigger the problem? If this is an issue with processing a dataset, YOU MUST include a copy of your dataset uploaded on Google Drive or Dropbox (otherwise we cannot reproduce this).
|
||||
### How can we reproduce this? What steps did you do to trigger the problem? If this is an issue with processing a dataset, YOU MUST include a copy of your dataset AND task output log, uploaded on Google Drive or Dropbox (otherwise we cannot reproduce this).
|
||||
|
||||
[Type answer here]
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM nvidia/cuda:11.2.0-devel-ubuntu20.04 AS builder
|
||||
FROM nvidia/cuda:11.2.2-devel-ubuntu20.04 AS builder
|
||||
|
||||
# Env variables
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
|
@ -21,7 +21,7 @@ RUN bash configure.sh clean
|
|||
|
||||
### Use a second image for the final asset to reduce the number and
|
||||
# size of the layers.
|
||||
FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04
|
||||
FROM nvidia/cuda:11.2.2-runtime-ubuntu20.04
|
||||
#FROM nvidia/cuda:11.2.0-devel-ubuntu20.04
|
||||
|
||||
# Env variables
|
||||
|
|
|
@ -29,8 +29,12 @@ OutputBaseFilename=ODM_Setup_{#MyAppVersion}
|
|||
Compression=lzma
|
||||
SolidCompression=yes
|
||||
ArchitecturesAllowed=x64
|
||||
ArchitecturesInstallIn64BitMode=x64
|
||||
#ifndef SKIP_SIGN
|
||||
SignTool=signtool
|
||||
#endif
|
||||
PrivilegesRequired=lowest
|
||||
PrivilegesRequiredOverridesAllowed=commandline
|
||||
UsePreviousAppDir=no
|
||||
;SetupIconFile=setup.ico
|
||||
|
||||
|
@ -45,7 +49,7 @@ Source: "stages\*"; DestDir: "{app}\stages"; Excludes: "__pycache__"; Flags: ign
|
|||
Source: "SuperBuild\install\bin\*"; DestDir: "{app}\SuperBuild\install\bin"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
Source: "SuperBuild\install\lib\python3.8\*"; DestDir: "{app}\SuperBuild\install\lib\python3.8"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
Source: "venv\*"; DestDir: "{app}\venv"; Excludes: "__pycache__,pyvenv.cfg"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
Source: "python38\*"; DestDir: "{app}\python38"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
Source: "python38\*"; DestDir: "{app}\venv\Scripts"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
Source: "console.bat"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: "VERSION"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: "LICENSE"; DestDir: "{app}"; Flags: ignoreversion
|
||||
|
@ -55,6 +59,10 @@ Source: "settings.yaml"; DestDir: "{app}"; Flags: ignoreversion
|
|||
Source: "win32env.bat"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: "winrun.bat"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: "SuperBuild\download\vc_redist.x64.exe"; DestDir: {tmp}; Flags: dontcopy
|
||||
Source: "winpostinstall.bat"; DestDir: "{app}"; Flags: ignoreversion
|
||||
|
||||
[Dirs]
|
||||
Name: "{commonappdata}\ODM"; Permissions: users-modify
|
||||
|
||||
[Icons]
|
||||
Name: {group}\ODM Console; Filename: "{app}\console.bat"; WorkingDir: "{app}"
|
||||
|
@ -65,21 +73,23 @@ Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{
|
|||
|
||||
[Run]
|
||||
Filename: "{tmp}\vc_redist.x64.exe"; StatusMsg: "Installing Visual C++ Redistributable Packages for Visual Studio 2019"; Parameters: "/quiet"; Check: VC2019RedistNeedsInstall ; Flags: waituntilterminated
|
||||
Filename: "{app}\winpostinstall.bat"; StatusMsg: "Post Install"; Flags: waituntilterminated runhidden
|
||||
Filename: "{app}\console.bat"; Description: {cm:LaunchProgram,ODM Console}; Flags: nowait postinstall skipifsilent
|
||||
|
||||
[Code]
|
||||
|
||||
function VC2019RedistNeedsInstall: Boolean;
|
||||
var
|
||||
var
|
||||
Version: String;
|
||||
begin
|
||||
if RegQueryStringValue(HKEY_LOCAL_MACHINE,
|
||||
'SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64', 'Version', Version) then
|
||||
begin
|
||||
// Is the installed version at least 14.14 ?
|
||||
// Is the installed version at least 14.14 ?
|
||||
Log('VC Redist Version check : found ' + Version);
|
||||
Result := (CompareStr(Version, 'v14.14.26429.03')<0);
|
||||
end
|
||||
else
|
||||
else
|
||||
begin
|
||||
// Not even an old version installed
|
||||
Result := True;
|
||||
|
|
|
@ -3,11 +3,16 @@ from opendm.net import download
|
|||
from opendm import log
|
||||
import zipfile
|
||||
import time
|
||||
import sys
|
||||
|
||||
def get_model(namespace, url, version, name = "model.onnx"):
|
||||
version = version.replace(".", "_")
|
||||
|
||||
base_dir = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")), "storage", "models")
|
||||
base_dir = os.path.join(os.path.dirname(__file__), "..")
|
||||
if sys.platform == 'win32':
|
||||
base_dir = os.path.join(os.getenv('PROGRAMDATA'),"ODM")
|
||||
base_dir = os.path.join(os.path.abspath(base_dir), "storage", "models")
|
||||
|
||||
namespace_dir = os.path.join(base_dir, namespace)
|
||||
versioned_dir = os.path.join(namespace_dir, version)
|
||||
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
from opendm import log
|
||||
from shlex import _find_unsafe
|
||||
import json
|
||||
import os
|
||||
|
||||
def double_quote(s):
|
||||
"""Return a shell-escaped version of the string *s*."""
|
||||
if not s:
|
||||
return '""'
|
||||
if _find_unsafe(s) is None:
|
||||
return s
|
||||
|
||||
# use double quotes, and prefix double quotes with a \
|
||||
# the string $"b is then quoted as "$\"b"
|
||||
return '"' + s.replace('"', '\\\"') + '"'
|
||||
|
||||
def args_to_dict(args):
|
||||
args_dict = vars(args)
|
||||
result = {}
|
||||
for k in sorted(args_dict.keys()):
|
||||
# Skip _is_set keys
|
||||
if k.endswith("_is_set"):
|
||||
continue
|
||||
|
||||
# Don't leak token
|
||||
if k == 'sm_cluster' and args_dict[k] is not None:
|
||||
result[k] = True
|
||||
else:
|
||||
result[k] = args_dict[k]
|
||||
|
||||
return result
|
||||
|
||||
def save_opts(opts_json, args):
|
||||
try:
|
||||
with open(opts_json, "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(args_to_dict(args)))
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot save options to %s: %s" % (opts_json, str(e)))
|
||||
|
||||
def compare_args(opts_json, args, rerun_stages):
|
||||
if not os.path.isfile(opts_json):
|
||||
return {}
|
||||
|
||||
try:
|
||||
diff = {}
|
||||
|
||||
with open(opts_json, "r", encoding="utf-8") as f:
|
||||
prev_args = json.loads(f.read())
|
||||
cur_args = args_to_dict(args)
|
||||
|
||||
for opt in cur_args:
|
||||
cur_value = cur_args[opt]
|
||||
prev_value = prev_args.get(opt, None)
|
||||
stage = rerun_stages.get(opt, None)
|
||||
|
||||
if stage is not None and cur_value != prev_value:
|
||||
diff[opt] = prev_value
|
||||
|
||||
return diff
|
||||
except:
|
||||
return {}
|
||||
|
||||
def find_rerun_stage(opts_json, args, rerun_stages, processopts):
|
||||
# Find the proper rerun stage if one is not explicitly set
|
||||
if not ('rerun_is_set' in args or 'rerun_from_is_set' in args or 'rerun_all_is_set' in args):
|
||||
args_diff = compare_args(opts_json, args, rerun_stages)
|
||||
if args_diff:
|
||||
if 'split_is_set' in args:
|
||||
return processopts[processopts.index('dataset'):], args_diff
|
||||
|
||||
try:
|
||||
stage_idxs = [processopts.index(rerun_stages[opt]) for opt in args_diff.keys() if rerun_stages[opt] is not None]
|
||||
return processopts[min(stage_idxs):], args_diff
|
||||
except ValueError as e:
|
||||
print(str(e))
|
||||
return None, {}
|
|
@ -25,6 +25,9 @@ def get_max_memory_mb(minimum = 100, use_at_most = 0.5):
|
|||
"""
|
||||
return max(minimum, (virtual_memory().available / 1024 / 1024) * use_at_most)
|
||||
|
||||
def get_total_memory():
|
||||
return virtual_memory().total
|
||||
|
||||
def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
|
||||
"""
|
||||
Our own implementation for parallel processing
|
||||
|
|
151
opendm/config.py
151
opendm/config.py
|
@ -13,6 +13,100 @@ processopts = ['dataset', 'split', 'merge', 'opensfm', 'openmvs', 'odm_filterpoi
|
|||
'odm_meshing', 'mvs_texturing', 'odm_georeferencing',
|
||||
'odm_dem', 'odm_orthophoto', 'odm_report', 'odm_postprocess']
|
||||
|
||||
rerun_stages = {
|
||||
'3d_tiles': 'odm_postprocess',
|
||||
'align': 'odm_georeferencing',
|
||||
'auto_boundary': 'odm_filterpoints',
|
||||
'auto_boundary_distance': 'odm_filterpoints',
|
||||
'bg_removal': 'dataset',
|
||||
'boundary': 'odm_filterpoints',
|
||||
'build_overviews': 'odm_orthophoto',
|
||||
'camera_lens': 'dataset',
|
||||
'cameras': 'dataset',
|
||||
'cog': 'odm_dem',
|
||||
'copy_to': 'odm_postprocess',
|
||||
'crop': 'odm_georeferencing',
|
||||
'dem_decimation': 'odm_dem',
|
||||
'dem_euclidean_map': 'odm_dem',
|
||||
'dem_gapfill_steps': 'odm_dem',
|
||||
'dem_resolution': 'odm_dem',
|
||||
'dsm': 'odm_dem',
|
||||
'dtm': 'odm_dem',
|
||||
'end_with': None,
|
||||
'fast_orthophoto': 'odm_filterpoints',
|
||||
'feature_quality': 'opensfm',
|
||||
'feature_type': 'opensfm',
|
||||
'force_gps': 'opensfm',
|
||||
'gcp': 'dataset',
|
||||
'geo': 'dataset',
|
||||
'gltf': 'mvs_texturing',
|
||||
'gps_accuracy': 'dataset',
|
||||
'help': None,
|
||||
'ignore_gsd': 'opensfm',
|
||||
'matcher_neighbors': 'opensfm',
|
||||
'matcher_order': 'opensfm',
|
||||
'matcher_type': 'opensfm',
|
||||
'max_concurrency': None,
|
||||
'merge': 'Merge',
|
||||
'mesh_octree_depth': 'odm_meshing',
|
||||
'mesh_size': 'odm_meshing',
|
||||
'min_num_features': 'opensfm',
|
||||
'name': None,
|
||||
'no_gpu': None,
|
||||
'optimize_disk_space': None,
|
||||
'orthophoto_compression': 'odm_orthophoto',
|
||||
'orthophoto_cutline': 'odm_orthophoto',
|
||||
'orthophoto_kmz': 'odm_orthophoto',
|
||||
'orthophoto_no_tiled': 'odm_orthophoto',
|
||||
'orthophoto_png': 'odm_orthophoto',
|
||||
'orthophoto_resolution': 'odm_orthophoto',
|
||||
'pc_classify': 'odm_georeferencing',
|
||||
'pc_copc': 'odm_georeferencing',
|
||||
'pc_csv': 'odm_georeferencing',
|
||||
'pc_ept': 'odm_georeferencing',
|
||||
'pc_filter': 'openmvs',
|
||||
'pc_las': 'odm_georeferencing',
|
||||
'pc_quality': 'opensfm',
|
||||
'pc_rectify': 'odm_georeferencing',
|
||||
'pc_sample': 'odm_filterpoints',
|
||||
'pc_skip_geometric': 'openmvs',
|
||||
'primary_band': 'dataset',
|
||||
'project_path': None,
|
||||
'radiometric_calibration': 'opensfm',
|
||||
'rerun': None,
|
||||
'rerun_all': None,
|
||||
'rerun_from': None,
|
||||
'rolling_shutter': 'opensfm',
|
||||
'rolling_shutter_readout': 'opensfm',
|
||||
'sfm_algorithm': 'opensfm',
|
||||
'sfm_no_partial': 'opensfm',
|
||||
'skip_3dmodel': 'odm_meshing',
|
||||
'skip_band_alignment': 'opensfm',
|
||||
'skip_orthophoto': 'odm_orthophoto',
|
||||
'skip_report': 'odm_report',
|
||||
'sky_removal': 'dataset',
|
||||
'sm_cluster': 'split',
|
||||
'sm_no_align': 'split',
|
||||
'smrf_scalar': 'odm_dem',
|
||||
'smrf_slope': 'odm_dem',
|
||||
'smrf_threshold': 'odm_dem',
|
||||
'smrf_window': 'odm_dem',
|
||||
'split': 'split',
|
||||
'split_image_groups': 'split',
|
||||
'split_overlap': 'split',
|
||||
'texturing_keep_unseen_faces': 'mvs_texturing',
|
||||
'texturing_single_material': 'mvs_texturing',
|
||||
'texturing_skip_global_seam_leveling': 'mvs_texturing',
|
||||
'tiles': 'odm_dem',
|
||||
'use_3dmesh': 'mvs_texturing',
|
||||
'use_exif': 'dataset',
|
||||
'use_fixed_camera_params': 'opensfm',
|
||||
'use_hybrid_bundle_adjustment': 'opensfm',
|
||||
'version': None,
|
||||
'video_limit': 'dataset',
|
||||
'video_resolution': 'dataset',
|
||||
}
|
||||
|
||||
with open(os.path.join(context.root_path, 'VERSION')) as version_file:
|
||||
__version__ = version_file.read().strip()
|
||||
|
||||
|
@ -123,8 +217,8 @@ def config(argv=None, parser=None):
|
|||
parser.add_argument('--feature-type',
|
||||
metavar='<string>',
|
||||
action=StoreValue,
|
||||
default='sift',
|
||||
choices=['akaze', 'hahog', 'orb', 'sift'],
|
||||
default='dspsift',
|
||||
choices=['akaze', 'dspsift', 'hahog', 'orb', 'sift'],
|
||||
help=('Choose the algorithm for extracting keypoints and computing descriptors. '
|
||||
'Can be one of: %(choices)s. Default: '
|
||||
'%(default)s'))
|
||||
|
@ -153,6 +247,13 @@ def config(argv=None, parser=None):
|
|||
default=0,
|
||||
type=int,
|
||||
help='Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--matcher-order',
|
||||
metavar='<positive integer>',
|
||||
action=StoreValue,
|
||||
default=0,
|
||||
type=int,
|
||||
help='Perform image matching with the nearest N images based on image filename order. Can speed up processing of sequential images, such as those extracted from video. It is applied only on non-georeferenced datasets. Set to 0 to disable. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--use-fixed-camera-params',
|
||||
action=StoreTrue,
|
||||
|
@ -175,7 +276,7 @@ def config(argv=None, parser=None):
|
|||
metavar='<string>',
|
||||
action=StoreValue,
|
||||
default='auto',
|
||||
choices=['auto', 'perspective', 'brown', 'fisheye', 'spherical', 'equirectangular', 'dual'],
|
||||
choices=['auto', 'perspective', 'brown', 'fisheye', 'fisheye_opencv', 'spherical', 'equirectangular', 'dual'],
|
||||
help=('Set a camera projection type. Manually setting a value '
|
||||
'can help improve geometric undistortion. By default the application '
|
||||
'tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: '
|
||||
|
@ -219,6 +320,12 @@ def config(argv=None, parser=None):
|
|||
'Can be one of: %(choices)s. Default: '
|
||||
'%(default)s'))
|
||||
|
||||
parser.add_argument('--sfm-no-partial',
|
||||
action=StoreTrue,
|
||||
nargs=0,
|
||||
default=False,
|
||||
help='Do not attempt to merge partial reconstructions. This can happen when images do not have sufficient overlap or are isolated. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--sky-removal',
|
||||
action=StoreTrue,
|
||||
nargs=0,
|
||||
|
@ -259,10 +366,11 @@ def config(argv=None, parser=None):
|
|||
action=StoreTrue,
|
||||
nargs=0,
|
||||
default=False,
|
||||
help='Ignore Ground Sampling Distance (GSD). GSD '
|
||||
'caps the maximum resolution of image outputs and '
|
||||
'resizes images when necessary, resulting in faster processing and '
|
||||
'lower memory usage. Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. Default: %(default)s')
|
||||
help='Ignore Ground Sampling Distance (GSD).'
|
||||
'A memory and processor hungry change relative to the default behavior if set to true. '
|
||||
'Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. '
|
||||
'Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. '
|
||||
'Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--no-gpu',
|
||||
action=StoreTrue,
|
||||
|
@ -344,7 +452,7 @@ def config(argv=None, parser=None):
|
|||
action=StoreTrue,
|
||||
nargs=0,
|
||||
default=False,
|
||||
help='Classify the point cloud outputs using a Simple Morphological Filter. '
|
||||
help='Classify the point cloud outputs. '
|
||||
'You can control the behavior of this option by tweaking the --dem-* parameters. '
|
||||
'Default: '
|
||||
'%(default)s')
|
||||
|
@ -377,7 +485,7 @@ def config(argv=None, parser=None):
|
|||
metavar='<positive float>',
|
||||
action=StoreValue,
|
||||
type=float,
|
||||
default=2.5,
|
||||
default=5,
|
||||
help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. '
|
||||
'Default: %(default)s')
|
||||
|
||||
|
@ -396,13 +504,6 @@ def config(argv=None, parser=None):
|
|||
help='Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. '
|
||||
'Default: %(default)s')
|
||||
|
||||
parser.add_argument('--pc-tile',
|
||||
action=StoreTrue,
|
||||
nargs=0,
|
||||
default=False,
|
||||
help='Reduce the memory usage needed for depthmap fusion by splitting large scenes into tiles. Turn this on if your machine doesn\'t have much RAM and/or you\'ve set --pc-quality to high or ultra. Experimental. '
|
||||
'Default: %(default)s')
|
||||
|
||||
parser.add_argument('--smrf-scalar',
|
||||
metavar='<positive float>',
|
||||
action=StoreValue,
|
||||
|
@ -441,12 +542,6 @@ def config(argv=None, parser=None):
|
|||
default=False,
|
||||
help=('Skip normalization of colors across all images. Useful when processing radiometric data. Default: %(default)s'))
|
||||
|
||||
parser.add_argument('--texturing-skip-local-seam-leveling',
|
||||
action=StoreTrue,
|
||||
nargs=0,
|
||||
default=False,
|
||||
help='Skip the blending of colors near seams. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--texturing-keep-unseen-faces',
|
||||
action=StoreTrue,
|
||||
nargs=0,
|
||||
|
@ -485,11 +580,11 @@ def config(argv=None, parser=None):
|
|||
action=StoreValue,
|
||||
default=None,
|
||||
help=('Path to the image geolocation file containing the camera center coordinates used for georeferencing. '
|
||||
'If you don''t have values for omega/phi/kappa you can set them to 0. '
|
||||
'If you don\'t have values for yaw/pitch/roll you can set them to 0. '
|
||||
'The file needs to '
|
||||
'use the following format: \n'
|
||||
'EPSG:<code> or <+proj definition>\n'
|
||||
'image_name geo_x geo_y geo_z [omega (degrees)] [phi (degrees)] [kappa (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n'
|
||||
'image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n'
|
||||
'Default: %(default)s'))
|
||||
|
||||
parser.add_argument('--align',
|
||||
|
@ -537,7 +632,7 @@ def config(argv=None, parser=None):
|
|||
action=StoreValue,
|
||||
type=float,
|
||||
default=5,
|
||||
help='DSM/DTM resolution in cm / pixel. Note that this value is capped to 2x the ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also.'
|
||||
help='DSM/DTM resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate.'
|
||||
' Default: %(default)s')
|
||||
|
||||
parser.add_argument('--dem-decimation',
|
||||
|
@ -564,7 +659,7 @@ def config(argv=None, parser=None):
|
|||
action=StoreValue,
|
||||
default=5,
|
||||
type=float,
|
||||
help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also. '
|
||||
help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate.'
|
||||
'Default: %(default)s'))
|
||||
|
||||
parser.add_argument('--orthophoto-no-tiled',
|
||||
|
@ -743,7 +838,7 @@ def config(argv=None, parser=None):
|
|||
type=float,
|
||||
action=StoreValue,
|
||||
metavar='<positive float>',
|
||||
default=10,
|
||||
default=3,
|
||||
help='Set a value in meters for the GPS Dilution of Precision (DOP) '
|
||||
'information for all images. If your images are tagged '
|
||||
'with high precision GPS information (RTK), this value will be automatically '
|
||||
|
@ -785,7 +880,7 @@ def config(argv=None, parser=None):
|
|||
'Default: %(default)s'))
|
||||
|
||||
args, unknown = parser.parse_known_args(argv)
|
||||
DEPRECATED = ["--verbose", "--debug", "--time", "--resize-to", "--depthmap-resolution", "--pc-geometric", "--texturing-data-term", "--texturing-outlier-removal-type", "--texturing-tone-mapping"]
|
||||
DEPRECATED = ["--verbose", "--debug", "--time", "--resize-to", "--depthmap-resolution", "--pc-geometric", "--texturing-data-term", "--texturing-outlier-removal-type", "--texturing-tone-mapping", "--texturing-skip-local-seam-leveling"]
|
||||
unknown_e = [p for p in unknown if p not in DEPRECATED]
|
||||
if len(unknown_e) > 0:
|
||||
raise parser.error("unrecognized arguments: %s" % " ".join(unknown_e))
|
||||
|
|
|
@ -5,22 +5,17 @@ import numpy
|
|||
import math
|
||||
import time
|
||||
import shutil
|
||||
import functools
|
||||
import glob
|
||||
import re
|
||||
from joblib import delayed, Parallel
|
||||
from opendm.system import run
|
||||
from opendm import point_cloud
|
||||
from opendm import io
|
||||
from opendm import system
|
||||
from opendm.concurrency import get_max_memory, parallel_map
|
||||
from scipy import ndimage
|
||||
from opendm.concurrency import get_max_memory, parallel_map, get_total_memory
|
||||
from datetime import datetime
|
||||
from opendm.vendor.gdal_fillnodata import main as gdal_fillnodata
|
||||
from opendm import log
|
||||
try:
|
||||
import Queue as queue
|
||||
except:
|
||||
import queue
|
||||
import threading
|
||||
|
||||
from .ground_rectification.rectify import run_rectification
|
||||
from . import pdal
|
||||
|
@ -46,16 +41,18 @@ def classify(lasFile, scalar, slope, threshold, window):
|
|||
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
|
||||
return lasFile
|
||||
|
||||
def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_points=500):
|
||||
def rectify(lasFile, reclassify_threshold=5, min_area=750, min_points=500):
|
||||
start = datetime.now()
|
||||
|
||||
try:
|
||||
|
||||
log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points))
|
||||
run_rectification(
|
||||
input=lasFile, output=lasFile, debug=debug, \
|
||||
input=lasFile, output=lasFile, \
|
||||
reclassify_plan='median', reclassify_threshold=reclassify_threshold, \
|
||||
extend_plan='surrounding', extend_grid_distance=5, \
|
||||
min_area=min_area, min_points=min_points)
|
||||
|
||||
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Error rectifying ground in file %s: %s" % (lasFile, str(e)))
|
||||
|
@ -66,114 +63,51 @@ error = None
|
|||
|
||||
def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
|
||||
outdir='', resolution=0.1, max_workers=1, max_tile_size=4096,
|
||||
decimation=None, keep_unfilled_copy=False,
|
||||
apply_smoothing=True):
|
||||
decimation=None, with_euclidean_map=False,
|
||||
apply_smoothing=True, max_tiles=None):
|
||||
""" Create DEM from multiple radii, and optionally gapfill """
|
||||
|
||||
global error
|
||||
error = None
|
||||
|
||||
start = datetime.now()
|
||||
|
||||
if not os.path.exists(outdir):
|
||||
log.ODM_INFO("Creating %s" % outdir)
|
||||
os.mkdir(outdir)
|
||||
|
||||
extent = point_cloud.get_extent(input_point_cloud)
|
||||
log.ODM_INFO("Point cloud bounds are [minx: %s, maxx: %s] [miny: %s, maxy: %s]" % (extent['minx'], extent['maxx'], extent['miny'], extent['maxy']))
|
||||
ext_width = extent['maxx'] - extent['minx']
|
||||
ext_height = extent['maxy'] - extent['miny']
|
||||
|
||||
w, h = (int(math.ceil(ext_width / float(resolution))),
|
||||
int(math.ceil(ext_height / float(resolution))))
|
||||
|
||||
# Set a floor, no matter the resolution parameter
|
||||
# (sometimes a wrongly estimated scale of the model can cause the resolution
|
||||
# to be set unrealistically low, causing errors)
|
||||
RES_FLOOR = 64
|
||||
if w < RES_FLOOR and h < RES_FLOOR:
|
||||
prev_w, prev_h = w, h
|
||||
|
||||
if w >= h:
|
||||
w, h = (RES_FLOOR, int(math.ceil(ext_height / ext_width * RES_FLOOR)))
|
||||
else:
|
||||
w, h = (int(math.ceil(ext_width / ext_height * RES_FLOOR)), RES_FLOOR)
|
||||
|
||||
floor_ratio = prev_w / float(w)
|
||||
resolution *= floor_ratio
|
||||
radiuses = [str(float(r) * floor_ratio) for r in radiuses]
|
||||
|
||||
log.ODM_WARNING("Really low resolution DEM requested %s will set floor at %s pixels. Resolution changed to %s. The scale of this reconstruction might be off." % ((prev_w, prev_h), RES_FLOOR, resolution))
|
||||
|
||||
final_dem_pixels = w * h
|
||||
|
||||
num_splits = int(max(1, math.ceil(math.log(math.ceil(final_dem_pixels / float(max_tile_size * max_tile_size)))/math.log(2))))
|
||||
num_tiles = num_splits * num_splits
|
||||
log.ODM_INFO("DEM resolution is %s, max tile size is %s, will split DEM generation into %s tiles" % ((h, w), max_tile_size, num_tiles))
|
||||
|
||||
tile_bounds_width = ext_width / float(num_splits)
|
||||
tile_bounds_height = ext_height / float(num_splits)
|
||||
|
||||
tiles = []
|
||||
|
||||
for r in radiuses:
|
||||
minx = extent['minx']
|
||||
|
||||
for x in range(num_splits):
|
||||
miny = extent['miny']
|
||||
if x == num_splits - 1:
|
||||
maxx = extent['maxx']
|
||||
else:
|
||||
maxx = minx + tile_bounds_width
|
||||
|
||||
for y in range(num_splits):
|
||||
if y == num_splits - 1:
|
||||
maxy = extent['maxy']
|
||||
else:
|
||||
maxy = miny + tile_bounds_height
|
||||
|
||||
filename = os.path.join(os.path.abspath(outdir), '%s_r%s_x%s_y%s.tif' % (dem_type, r, x, y))
|
||||
|
||||
tiles.append({
|
||||
'radius': r,
|
||||
'bounds': {
|
||||
'minx': minx,
|
||||
'maxx': maxx,
|
||||
'miny': miny,
|
||||
'maxy': maxy
|
||||
},
|
||||
'filename': filename
|
||||
})
|
||||
|
||||
miny = maxy
|
||||
minx = maxx
|
||||
|
||||
# Sort tiles by increasing radius
|
||||
tiles.sort(key=lambda t: float(t['radius']), reverse=True)
|
||||
|
||||
def process_tile(q):
|
||||
log.ODM_INFO("Generating %s (%s, radius: %s, resolution: %s)" % (q['filename'], output_type, q['radius'], resolution))
|
||||
|
||||
d = pdal.json_gdal_base(q['filename'], output_type, q['radius'], resolution, q['bounds'])
|
||||
|
||||
if dem_type == 'dtm':
|
||||
d = pdal.json_add_classification_filter(d, 2)
|
||||
|
||||
if decimation is not None:
|
||||
d = pdal.json_add_decimation_filter(d, decimation)
|
||||
|
||||
pdal.json_add_readers(d, [input_point_cloud])
|
||||
pdal.run_pipeline(d)
|
||||
|
||||
parallel_map(process_tile, tiles, max_workers)
|
||||
kwargs = {
|
||||
'input': input_point_cloud,
|
||||
'outdir': outdir,
|
||||
'outputType': output_type,
|
||||
'radiuses': ",".join(map(str, radiuses)),
|
||||
'resolution': resolution,
|
||||
'maxTiles': 0 if max_tiles is None else max_tiles,
|
||||
'decimation': 1 if decimation is None else decimation,
|
||||
'classification': 2 if dem_type == 'dtm' else -1,
|
||||
'tileSize': max_tile_size
|
||||
}
|
||||
system.run('renderdem "{input}" '
|
||||
'--outdir "{outdir}" '
|
||||
'--output-type {outputType} '
|
||||
'--radiuses {radiuses} '
|
||||
'--resolution {resolution} '
|
||||
'--max-tiles {maxTiles} '
|
||||
'--decimation {decimation} '
|
||||
'--classification {classification} '
|
||||
'--tile-size {tileSize} '
|
||||
'--force '.format(**kwargs), env_vars={'OMP_NUM_THREADS': max_workers})
|
||||
|
||||
output_file = "%s.tif" % dem_type
|
||||
output_path = os.path.abspath(os.path.join(outdir, output_file))
|
||||
|
||||
# Verify tile results
|
||||
for t in tiles:
|
||||
if not os.path.exists(t['filename']):
|
||||
raise Exception("Error creating %s, %s failed to be created" % (output_file, t['filename']))
|
||||
# Fetch tiles
|
||||
tiles = []
|
||||
for p in glob.glob(os.path.join(os.path.abspath(outdir), "*.tif")):
|
||||
filename = os.path.basename(p)
|
||||
m = re.match("^r([\d\.]+)_x\d+_y\d+\.tif", filename)
|
||||
if m is not None:
|
||||
tiles.append({'filename': p, 'radius': float(m.group(1))})
|
||||
|
||||
if len(tiles) == 0:
|
||||
raise system.ExitException("No DEM tiles were generated, something went wrong")
|
||||
|
||||
log.ODM_INFO("Generated %s tiles" % len(tiles))
|
||||
|
||||
# Sort tiles by decreasing radius
|
||||
tiles.sort(key=lambda t: float(t['radius']), reverse=True)
|
||||
|
||||
# Create virtual raster
|
||||
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
|
||||
|
@ -185,7 +119,6 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
|
|||
run('gdalbuildvrt -input_file_list "%s" "%s" ' % (tiles_file_list, tiles_vrt_path))
|
||||
|
||||
merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
|
||||
geotiff_tmp_path = os.path.abspath(os.path.join(outdir, 'tiles.tmp.tif'))
|
||||
geotiff_small_path = os.path.abspath(os.path.join(outdir, 'tiles.small.tif'))
|
||||
geotiff_small_filled_path = os.path.abspath(os.path.join(outdir, 'tiles.small_filled.tif'))
|
||||
geotiff_path = os.path.abspath(os.path.join(outdir, 'tiles.tif'))
|
||||
|
@ -197,7 +130,6 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
|
|||
'tiles_vrt': tiles_vrt_path,
|
||||
'merged_vrt': merged_vrt_path,
|
||||
'geotiff': geotiff_path,
|
||||
'geotiff_tmp': geotiff_tmp_path,
|
||||
'geotiff_small': geotiff_small_path,
|
||||
'geotiff_small_filled': geotiff_small_filled_path
|
||||
}
|
||||
|
@ -206,31 +138,27 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
|
|||
# Sometimes, for some reason gdal_fillnodata.py
|
||||
# behaves strangely when reading data directly from a .VRT
|
||||
# so we need to convert to GeoTIFF first.
|
||||
# Scale to 10% size
|
||||
run('gdal_translate '
|
||||
'-co NUM_THREADS={threads} '
|
||||
'-co BIGTIFF=IF_SAFER '
|
||||
'-co COMPRESS=DEFLATE '
|
||||
'--config GDAL_CACHEMAX {max_memory}% '
|
||||
'"{tiles_vrt}" "{geotiff_tmp}"'.format(**kwargs))
|
||||
|
||||
# Scale to 10% size
|
||||
run('gdal_translate '
|
||||
'-co NUM_THREADS={threads} '
|
||||
'-co BIGTIFF=IF_SAFER '
|
||||
'--config GDAL_CACHEMAX {max_memory}% '
|
||||
'-outsize 10% 0 '
|
||||
'"{geotiff_tmp}" "{geotiff_small}"'.format(**kwargs))
|
||||
'-outsize 10% 0 '
|
||||
'"{tiles_vrt}" "{geotiff_small}"'.format(**kwargs))
|
||||
|
||||
# Fill scaled
|
||||
gdal_fillnodata(['.',
|
||||
'-co', 'NUM_THREADS=%s' % kwargs['threads'],
|
||||
'-co', 'BIGTIFF=IF_SAFER',
|
||||
'-co', 'COMPRESS=DEFLATE',
|
||||
'--config', 'GDAL_CACHE_MAX', str(kwargs['max_memory']) + '%',
|
||||
'-b', '1',
|
||||
'-of', 'GTiff',
|
||||
kwargs['geotiff_small'], kwargs['geotiff_small_filled']])
|
||||
|
||||
# Merge filled scaled DEM with unfilled DEM using bilinear interpolation
|
||||
run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, geotiff_tmp_path))
|
||||
run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, tiles_vrt_path))
|
||||
run('gdal_translate '
|
||||
'-co NUM_THREADS={threads} '
|
||||
'-co TILED=YES '
|
||||
|
@ -253,14 +181,14 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
|
|||
else:
|
||||
os.replace(geotiff_path, output_path)
|
||||
|
||||
if os.path.exists(geotiff_tmp_path):
|
||||
if not keep_unfilled_copy:
|
||||
os.remove(geotiff_tmp_path)
|
||||
else:
|
||||
os.replace(geotiff_tmp_path, io.related_file_path(output_path, postfix=".unfilled"))
|
||||
if os.path.exists(tiles_vrt_path):
|
||||
if with_euclidean_map:
|
||||
emap_path = io.related_file_path(output_path, postfix=".euclideand")
|
||||
compute_euclidean_map(tiles_vrt_path, emap_path, overwrite=True)
|
||||
|
||||
for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
|
||||
if os.path.exists(cleanup_file): os.remove(cleanup_file)
|
||||
|
||||
for t in tiles:
|
||||
if os.path.exists(t['filename']): os.remove(t['filename'])
|
||||
|
||||
|
@ -276,12 +204,20 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
|
|||
with rasterio.open(geotiff_path) as f:
|
||||
nodata = f.nodatavals[0]
|
||||
|
||||
if not os.path.exists(output_path) or overwrite:
|
||||
if not os.path.isfile(output_path) or overwrite:
|
||||
if os.path.isfile(output_path):
|
||||
os.remove(output_path)
|
||||
|
||||
log.ODM_INFO("Computing euclidean distance: %s" % output_path)
|
||||
|
||||
if gdal_proximity is not None:
|
||||
try:
|
||||
gdal_proximity(['gdal_proximity.py', geotiff_path, output_path, '-values', str(nodata)])
|
||||
gdal_proximity(['gdal_proximity.py',
|
||||
geotiff_path, output_path, '-values', str(nodata),
|
||||
'-co', 'TILED=YES',
|
||||
'-co', 'BIGTIFF=IF_SAFER',
|
||||
'-co', 'COMPRESS=DEFLATE',
|
||||
])
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot compute euclidean distance: %s" % str(e))
|
||||
|
||||
|
@ -297,63 +233,34 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
|
|||
return output_path
|
||||
|
||||
|
||||
def median_smoothing(geotiff_path, output_path, smoothing_iterations=1, window_size=512, num_workers=1):
|
||||
def median_smoothing(geotiff_path, output_path, window_size=512, num_workers=1, radius=4):
|
||||
""" Apply median smoothing """
|
||||
start = datetime.now()
|
||||
|
||||
if not os.path.exists(geotiff_path):
|
||||
raise Exception('File %s does not exist!' % geotiff_path)
|
||||
|
||||
log.ODM_INFO('Starting smoothing...')
|
||||
|
||||
with rasterio.open(geotiff_path) as img:
|
||||
nodata = img.nodatavals[0]
|
||||
dtype = img.dtypes[0]
|
||||
shape = img.shape
|
||||
arr = img.read()[0]
|
||||
for i in range(smoothing_iterations):
|
||||
log.ODM_INFO("Smoothing iteration %s" % str(i + 1))
|
||||
rows, cols = numpy.meshgrid(numpy.arange(0, shape[0], window_size), numpy.arange(0, shape[1], window_size))
|
||||
rows = rows.flatten()
|
||||
cols = cols.flatten()
|
||||
rows_end = numpy.minimum(rows + window_size, shape[0])
|
||||
cols_end= numpy.minimum(cols + window_size, shape[1])
|
||||
windows = numpy.dstack((rows, cols, rows_end, cols_end)).reshape(-1, 4)
|
||||
|
||||
filter = functools.partial(ndimage.median_filter, size=9, output=dtype, mode='nearest')
|
||||
|
||||
# threading backend and GIL released filter are important for memory efficiency and multi-core performance
|
||||
window_arrays = Parallel(n_jobs=num_workers, backend='threading')(delayed(window_filter_2d)(arr, nodata , window, 9, filter) for window in windows)
|
||||
|
||||
for window, win_arr in zip(windows, window_arrays):
|
||||
arr[window[0]:window[2], window[1]:window[3]] = win_arr
|
||||
log.ODM_INFO("Smoothing completed in %s" % str(datetime.now() - start))
|
||||
# write output
|
||||
with rasterio.open(output_path, 'w', BIGTIFF="IF_SAFER", **img.profile) as imgout:
|
||||
imgout.write(arr, 1)
|
||||
kwargs = {
|
||||
'input': geotiff_path,
|
||||
'output': output_path,
|
||||
'window': window_size,
|
||||
'radius': radius,
|
||||
}
|
||||
system.run('fastrasterfilter "{input}" '
|
||||
'--output "{output}" '
|
||||
'--window-size {window} '
|
||||
'--radius {radius} '
|
||||
'--co TILED=YES '
|
||||
'--co BIGTIFF=IF_SAFER '
|
||||
'--co COMPRESS=DEFLATE '.format(**kwargs), env_vars={'OMP_NUM_THREADS': num_workers})
|
||||
|
||||
log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start))
|
||||
return output_path
|
||||
|
||||
|
||||
def window_filter_2d(arr, nodata, window, kernel_size, filter):
|
||||
"""
|
||||
Apply a filter to dem within a window, expects to work with kernal based filters
|
||||
|
||||
:param geotiff_path: path to the geotiff to filter
|
||||
:param window: the window to apply the filter, should be a list contains row start, col_start, row_end, col_end
|
||||
:param kernel_size: the size of the kernel for the filter, works with odd numbers, need to test if it works with even numbers
|
||||
:param filter: the filter function which takes a 2d array as input and filter results as output.
|
||||
"""
|
||||
shape = arr.shape[:2]
|
||||
if window[0] < 0 or window[1] < 0 or window[2] > shape[0] or window[3] > shape[1]:
|
||||
raise Exception('Window is out of bounds')
|
||||
expanded_window = [ max(0, window[0] - kernel_size // 2), max(0, window[1] - kernel_size // 2), min(shape[0], window[2] + kernel_size // 2), min(shape[1], window[3] + kernel_size // 2) ]
|
||||
win_arr = arr[expanded_window[0]:expanded_window[2], expanded_window[1]:expanded_window[3]]
|
||||
# Should have a better way to handle nodata, similar to the way the filter algorithms handle the border (reflection, nearest, interpolation, etc).
|
||||
# For now will follow the old approach to guarantee identical outputs
|
||||
nodata_locs = win_arr == nodata
|
||||
win_arr = filter(win_arr)
|
||||
win_arr[nodata_locs] = nodata
|
||||
win_arr = win_arr[window[0] - expanded_window[0] : window[2] - expanded_window[0], window[1] - expanded_window[1] : window[3] - expanded_window[1]]
|
||||
return win_arr
|
||||
def get_dem_radius_steps(stats_file, steps, resolution, multiplier = 1.0):
|
||||
radius_steps = [point_cloud.get_spacing(stats_file, resolution) * multiplier]
|
||||
for _ in range(steps - 1):
|
||||
radius_steps.append(radius_steps[-1] * math.sqrt(2))
|
||||
|
||||
return radius_steps
|
|
@ -0,0 +1,25 @@
|
|||
import numpy as np
|
||||
from .dimension import Dimension
|
||||
|
||||
class UserDataDimension(Dimension):
|
||||
"""A dimension that stores the user data of a point cloud."""
|
||||
|
||||
def __init__(self):
|
||||
super(UserDataDimension, self).__init__()
|
||||
|
||||
def assign_default(self, point_cloud):
|
||||
default = np.full(point_cloud.len(), 0, dtype=np.uint8)
|
||||
super(UserDataDimension, self)._set_values(point_cloud, default)
|
||||
|
||||
def assign(self, *point_clouds, **kwargs):
|
||||
|
||||
# Simply copy the value of the UserData dimension from the original point cloud
|
||||
# to the new point cloud
|
||||
for point_cloud in point_clouds:
|
||||
super(UserDataDimension, self)._set_values(point_cloud, point_cloud.user_data)
|
||||
|
||||
def get_name(self):
|
||||
return 'UserData'
|
||||
|
||||
def get_las_type(self):
|
||||
return 'uint8'
|
|
@ -1,47 +1,122 @@
|
|||
# TODO: Move to pylas when project migrates to python3
|
||||
|
||||
import laspy
|
||||
import time
|
||||
from opendm.dem.ground_rectification.extra_dimensions.userdata_dimension import UserDataDimension
|
||||
import pdal
|
||||
import numpy as np
|
||||
from opendm import log
|
||||
from ..point_cloud import PointCloud
|
||||
import pdb
|
||||
import json
|
||||
|
||||
def read_cloud(point_cloud_path):
|
||||
# Open point cloud and read its properties
|
||||
las_file = laspy.read(point_cloud_path)
|
||||
header = las_file.header
|
||||
pipeline = pdal.Pipeline('[{"type":"readers.las","filename":"%s"}]' % point_cloud_path)
|
||||
pipeline.execute()
|
||||
|
||||
x = las_file.x.scaled_array()
|
||||
y = las_file.y.scaled_array()
|
||||
z = las_file.z.scaled_array()
|
||||
arrays = pipeline.arrays[0]
|
||||
|
||||
cloud = PointCloud.with_dimensions(x, y, z, las_file.classification.array, las_file.red, las_file.green, las_file.blue)
|
||||
# Extract point coordinates, classification, and RGB values
|
||||
x = arrays["X"]
|
||||
y = arrays["Y"]
|
||||
z = arrays["Z"]
|
||||
classification = arrays["Classification"].astype(np.uint8)
|
||||
red = arrays["Red"]
|
||||
green = arrays["Green"]
|
||||
blue = arrays["Blue"]
|
||||
|
||||
# Return the result
|
||||
return header, cloud
|
||||
cloud = PointCloud.with_dimensions(x, y, z, classification, red, green, blue)
|
||||
|
||||
def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimensions=False):
|
||||
# Open output file
|
||||
output_las_file = laspy.LasData(header)
|
||||
if "UserData" in arrays.dtype.names:
|
||||
cloud.add_dimension(UserDataDimension(), arrays["UserData"])
|
||||
|
||||
if write_extra_dimensions:
|
||||
extra_dims = [laspy.ExtraBytesParams(name=name, type=dimension.get_las_type(), description="Dimension added by Ground Extend") for name, dimension in point_cloud.extra_dimensions_metadata.items()]
|
||||
output_las_file.add_extra_dims(extra_dims)
|
||||
# Assign dimension values
|
||||
for dimension_name, values in point_cloud.extra_dimensions.items():
|
||||
setattr(output_las_file, dimension_name, values)
|
||||
return pipeline.metadata["metadata"]["readers.las"], cloud
|
||||
|
||||
|
||||
def safe_add_metadata(pipeline, metadata, key, sourcekey=None):
|
||||
k = key if sourcekey is None else sourcekey
|
||||
if k in metadata:
|
||||
pipeline["pipeline"][0][key] = metadata[k]
|
||||
|
||||
|
||||
def write_cloud(metadata, point_cloud, output_point_cloud_path):
|
||||
|
||||
# Adapt points to scale and offset
|
||||
[x, y] = np.hsplit(point_cloud.xy, 2)
|
||||
output_las_file.x = x.ravel()
|
||||
output_las_file.y = y.ravel()
|
||||
output_las_file.z = point_cloud.z
|
||||
x, y = np.hsplit(point_cloud.xy, 2)
|
||||
|
||||
# Set color
|
||||
[red, green, blue] = np.hsplit(point_cloud.rgb, 3)
|
||||
output_las_file.red = red.ravel()
|
||||
output_las_file.green = green.ravel()
|
||||
output_las_file.blue = blue.ravel()
|
||||
red, green, blue = np.hsplit(point_cloud.rgb, 3)
|
||||
|
||||
# Set classification
|
||||
output_las_file.classification = point_cloud.classification.astype(np.uint8)
|
||||
arrays = np.zeros(len(x),
|
||||
dtype=[('X', '<f8'),
|
||||
('Y', '<f8'),
|
||||
('Z', '<f8'),
|
||||
('Intensity', '<u2'),
|
||||
('ReturnNumber', 'u1'),
|
||||
('NumberOfReturns', 'u1'),
|
||||
('ScanDirectionFlag', 'u1'),
|
||||
('EdgeOfFlightLine', 'u1'),
|
||||
('Classification', 'u1'),
|
||||
('ScanAngleRank', '<f4'),
|
||||
('UserData', 'u1'),
|
||||
('PointSourceId', '<u2'),
|
||||
('GpsTime', '<f8'),
|
||||
('Red', '<u2'),
|
||||
('Green', '<u2'),
|
||||
('Blue', '<u2')])
|
||||
arrays['X'] = x.ravel()
|
||||
arrays['Y'] = y.ravel()
|
||||
arrays['Z'] = point_cloud.z
|
||||
arrays['Classification'] = point_cloud.classification.astype(np.uint8).ravel()
|
||||
arrays['Red'] = red.astype(np.uint8).ravel()
|
||||
arrays['Green'] = green.astype(np.uint8).ravel()
|
||||
arrays['Blue'] = blue.astype(np.uint8).ravel()
|
||||
|
||||
output_las_file.write(output_point_cloud_path)
|
||||
if "UserData" in point_cloud.extra_dimensions:
|
||||
arrays['UserData'] = point_cloud.extra_dimensions["UserData"].ravel()
|
||||
|
||||
writer_pipeline = {
|
||||
"pipeline": [
|
||||
{
|
||||
"type": "writers.las",
|
||||
"filename": output_point_cloud_path,
|
||||
"compression": "lazperf",
|
||||
"extra_dims": "all"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
safe_add_metadata(writer_pipeline, metadata, "scale_x")
|
||||
safe_add_metadata(writer_pipeline, metadata, "scale_y")
|
||||
safe_add_metadata(writer_pipeline, metadata, "scale_z")
|
||||
safe_add_metadata(writer_pipeline, metadata, "offset_x")
|
||||
safe_add_metadata(writer_pipeline, metadata, "offset_y")
|
||||
safe_add_metadata(writer_pipeline, metadata, "offset_z")
|
||||
safe_add_metadata(writer_pipeline, metadata, "a_srs", "spatialreference")
|
||||
safe_add_metadata(writer_pipeline, metadata, "dataformat_id")
|
||||
safe_add_metadata(writer_pipeline, metadata, "system_id")
|
||||
safe_add_metadata(writer_pipeline, metadata, "software_id")
|
||||
safe_add_metadata(writer_pipeline, metadata, "creation_doy")
|
||||
safe_add_metadata(writer_pipeline, metadata, "creation_year")
|
||||
safe_add_metadata(writer_pipeline, metadata, "minor_version")
|
||||
safe_add_metadata(writer_pipeline, metadata, "major_version")
|
||||
safe_add_metadata(writer_pipeline, metadata, "file_source_id")
|
||||
safe_add_metadata(writer_pipeline, metadata, "global_encoding")
|
||||
|
||||
# The metadata object contains the VLRs as fields called "vlr_N" where N is the index of the VLR
|
||||
# We have to copy them over to the writer pipeline as a list of dictionaries in the "vlrs" field
|
||||
writer_pipeline["pipeline"][0]["vlrs"] = []
|
||||
|
||||
i = 0
|
||||
while True:
|
||||
vlr_field = "vlr_%d" % i
|
||||
if vlr_field in metadata:
|
||||
vlr = metadata[vlr_field]
|
||||
writer_pipeline["pipeline"][0]["vlrs"].append({
|
||||
"record_id": vlr["record_id"],
|
||||
"user_id": vlr["user_id"],
|
||||
"description": vlr["description"],
|
||||
"data": vlr["data"]
|
||||
})
|
||||
i += 1
|
||||
else:
|
||||
break
|
||||
|
||||
pipeline = pdal.Pipeline(json.dumps(writer_pipeline), arrays=[arrays])
|
||||
pipeline.execute()
|
||||
|
|
|
@ -23,7 +23,7 @@ def run_rectification(**kwargs):
|
|||
if 'extend_plan' in kwargs and kwargs['extend_plan'] is not None:
|
||||
point_cloud = extend_cloud(point_cloud, kwargs['extend_plan'], kwargs['extend_grid_distance'], kwargs['min_points'], kwargs['min_area'])
|
||||
|
||||
write_cloud(header, point_cloud, kwargs['output'], kwargs['debug'])
|
||||
write_cloud(header, point_cloud, kwargs['output'])
|
||||
|
||||
def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
|
||||
# Get only ground
|
||||
|
|
|
@ -168,6 +168,7 @@ def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window):
|
|||
|
||||
system.run(' '.join(cmd))
|
||||
|
||||
|
||||
def merge_point_clouds(input_files, output_file):
|
||||
if len(input_files) == 0:
|
||||
log.ODM_WARNING("Cannot merge point clouds, no point clouds to merge.")
|
||||
|
@ -181,3 +182,13 @@ def merge_point_clouds(input_files, output_file):
|
|||
|
||||
system.run(' '.join(cmd))
|
||||
|
||||
|
||||
def translate(input, output):
|
||||
cmd = [
|
||||
'pdal',
|
||||
'translate',
|
||||
'-i "%s"' % input,
|
||||
'-o "%s"' % output,
|
||||
]
|
||||
|
||||
system.run(' '.join(cmd))
|
|
@ -62,17 +62,48 @@ def build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=
|
|||
# Run untwine
|
||||
system.run('untwine --temp_dir "{tmpdir}" {files} --output_dir "{outputdir}"'.format(**kwargs))
|
||||
|
||||
def build_copc(input_point_cloud_files, output_file):
|
||||
def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
|
||||
if len(input_point_cloud_files) == 0:
|
||||
logger.ODM_WARNING("Cannot build COPC, no input files")
|
||||
return
|
||||
|
||||
base_path, ext = os.path.splitext(output_file)
|
||||
|
||||
tmpdir = io.related_file_path(base_path, postfix="-tmp")
|
||||
if os.path.exists(tmpdir):
|
||||
log.ODM_WARNING("Removing previous directory %s" % tmpdir)
|
||||
shutil.rmtree(tmpdir)
|
||||
cleanup = [tmpdir]
|
||||
|
||||
if convert_rgb_8_to_16:
|
||||
tmpdir16 = io.related_file_path(base_path, postfix="-tmp16")
|
||||
if os.path.exists(tmpdir16):
|
||||
log.ODM_WARNING("Removing previous directory %s" % tmpdir16)
|
||||
shutil.rmtree(tmpdir16)
|
||||
os.makedirs(tmpdir16, exist_ok=True)
|
||||
cleanup.append(tmpdir16)
|
||||
|
||||
converted = []
|
||||
ok = True
|
||||
for f in input_point_cloud_files:
|
||||
# Convert 8bit RGB to 16bit RGB (per COPC spec)
|
||||
base = os.path.basename(f)
|
||||
filename, ext = os.path.splitext(base)
|
||||
out_16 = os.path.join(tmpdir16, "%s_16%s" % (filename, ext))
|
||||
try:
|
||||
system.run('pdal translate -i "{input}" -o "{output}" assign '
|
||||
'--filters.assign.value="Red = Red / 255 * 65535" '
|
||||
'--filters.assign.value="Green = Green / 255 * 65535" '
|
||||
'--filters.assign.value="Blue = Blue / 255 * 65535" '.format(input=f, output=out_16))
|
||||
|
||||
converted.append(out_16)
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s" % str(e))
|
||||
ok = False
|
||||
break
|
||||
if ok:
|
||||
input_point_cloud_files = converted
|
||||
|
||||
kwargs = {
|
||||
'tmpdir': tmpdir,
|
||||
'files': "--files " + " ".join(map(double_quote, input_point_cloud_files)),
|
||||
|
@ -82,5 +113,6 @@ def build_copc(input_point_cloud_files, output_file):
|
|||
# Run untwine
|
||||
system.run('untwine --temp_dir "{tmpdir}" {files} -o "{output}" --single_file'.format(**kwargs))
|
||||
|
||||
if os.path.exists(tmpdir):
|
||||
shutil.rmtree(tmpdir)
|
||||
for d in cleanup:
|
||||
if os.path.exists(d):
|
||||
shutil.rmtree(d)
|
|
@ -0,0 +1,94 @@
|
|||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import base64
|
||||
from rasterio.io import MemoryFile
|
||||
from opendm.system import run
|
||||
from opendm import log
|
||||
from opendm.utils import double_quote
|
||||
|
||||
def extract_raw_thermal_image_data(image_path):
|
||||
try:
|
||||
f, tmp_file_path = tempfile.mkstemp(suffix='.json')
|
||||
os.close(f)
|
||||
|
||||
try:
|
||||
output = run("exiftool -b -x ThumbnailImage -x PreviewImage -j \"%s\" > \"%s\"" % (image_path, tmp_file_path), quiet=True)
|
||||
|
||||
with open(tmp_file_path) as f:
|
||||
j = json.loads(f.read())
|
||||
|
||||
if isinstance(j, list):
|
||||
j = j[0] # single file
|
||||
|
||||
if "RawThermalImage" in j:
|
||||
imageBytes = base64.b64decode(j["RawThermalImage"][len("base64:"):])
|
||||
|
||||
with MemoryFile(imageBytes) as memfile:
|
||||
with memfile.open() as dataset:
|
||||
img = dataset.read()
|
||||
bands, h, w = img.shape
|
||||
|
||||
if bands != 1:
|
||||
raise Exception("Raw thermal image has more than one band? This is not supported")
|
||||
|
||||
# (1, 512, 640) --> (512, 640, 1)
|
||||
img = img[0][:,:,None]
|
||||
|
||||
del j["RawThermalImage"]
|
||||
|
||||
return extract_temperature_params_from(j), img
|
||||
else:
|
||||
raise Exception("Invalid JSON (not a list)")
|
||||
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot extract tags using exiftool: %s" % str(e))
|
||||
return {}, None
|
||||
finally:
|
||||
if os.path.isfile(tmp_file_path):
|
||||
os.remove(tmp_file_path)
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot create temporary file: %s" % str(e))
|
||||
return {}, None
|
||||
|
||||
def unit(unit):
|
||||
def _convert(v):
|
||||
if isinstance(v, float):
|
||||
return v
|
||||
elif isinstance(v, str):
|
||||
if not v[-1].isnumeric():
|
||||
if v[-1].upper() != unit.upper():
|
||||
log.ODM_WARNING("Assuming %s is in %s" % (v, unit))
|
||||
return float(v[:-1])
|
||||
else:
|
||||
return float(v)
|
||||
else:
|
||||
return float(v)
|
||||
return _convert
|
||||
|
||||
def extract_temperature_params_from(tags):
|
||||
# Defaults
|
||||
meta = {
|
||||
"Emissivity": float,
|
||||
"ObjectDistance": unit("m"),
|
||||
"AtmosphericTemperature": unit("C"),
|
||||
"ReflectedApparentTemperature": unit("C"),
|
||||
"IRWindowTemperature": unit("C"),
|
||||
"IRWindowTransmission": float,
|
||||
"RelativeHumidity": unit("%"),
|
||||
"PlanckR1": float,
|
||||
"PlanckB": float,
|
||||
"PlanckF": float,
|
||||
"PlanckO": float,
|
||||
"PlanckR2": float,
|
||||
}
|
||||
|
||||
params = {}
|
||||
|
||||
for m in meta:
|
||||
if m not in tags:
|
||||
# All or nothing
|
||||
raise Exception("Cannot find %s in tags" % m)
|
||||
params[m] = (meta[m])(tags[m])
|
||||
|
||||
return params
|
|
@ -12,6 +12,9 @@ class GeoFile:
|
|||
|
||||
with open(self.geo_path, 'r') as f:
|
||||
contents = f.read().strip()
|
||||
|
||||
# Strip eventual BOM characters
|
||||
contents = contents.replace('\ufeff', '')
|
||||
|
||||
lines = list(map(str.strip, contents.split('\n')))
|
||||
if lines:
|
||||
|
|
|
@ -279,9 +279,10 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
|
|||
)
|
||||
|
||||
gltf.extensionsRequired = ['KHR_materials_unlit']
|
||||
gltf.extensionsUsed = ['KHR_materials_unlit']
|
||||
|
||||
if rtc != (None, None) and len(rtc) >= 2:
|
||||
gltf.extensionsUsed = ['CESIUM_RTC', 'KHR_materials_unlit']
|
||||
gltf.extensionsUsed.append('CESIUM_RTC')
|
||||
gltf.extensions = {
|
||||
'CESIUM_RTC': {
|
||||
'center': [float(rtc[0]), float(rtc[1]), 0.0]
|
||||
|
|
|
@ -16,18 +16,15 @@ def has_popsift_and_can_handle_texsize(width, height):
|
|||
compute_major, compute_minor = get_cuda_compute_version(0)
|
||||
if compute_major < 3 or (compute_major == 3 and compute_minor < 5):
|
||||
# Not supported
|
||||
log.ODM_WARNING("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
|
||||
log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
|
||||
return False
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot use GPU for feature extraction: %s" % str(e))
|
||||
log.ODM_INFO("Using CPU for feature extraction: %s" % str(e))
|
||||
return False
|
||||
|
||||
try:
|
||||
from opensfm import pypopsift
|
||||
fits = pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
|
||||
if not fits:
|
||||
log.ODM_WARNING("Image size (%sx%spx) would not fit in GPU memory, try lowering --feature-quality. Falling back to CPU" % (width, height))
|
||||
return fits
|
||||
return pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
return False
|
||||
except Exception as e:
|
||||
|
@ -91,5 +88,4 @@ def has_gpu(args):
|
|||
log.ODM_INFO("nvidia-smi detected")
|
||||
return True
|
||||
else:
|
||||
log.ODM_INFO("nvidia-smi not found in PATH, using CPU")
|
||||
return False
|
||||
|
|
|
@ -151,6 +151,8 @@ def parse_srs_header(header):
|
|||
' - EPSG:*****\n'
|
||||
' - WGS84 UTM **(N|S)\n'
|
||||
' - Any valid proj4 string (for example, +proj=utm +zone=32 +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs)\n\n'
|
||||
' Some valid EPSG codes are not yet available in OpenDroneMap and need substituted with valid proj4 strings\n'
|
||||
' Try searching for equivalent proj4 strings at spatialreference.org or epsg.io.\n'
|
||||
'Modify your input and try again.' % header)
|
||||
raise RuntimeError(e)
|
||||
|
||||
|
@ -165,4 +167,4 @@ def utm_transformers_from_ll(lon, lat):
|
|||
target_srs = utm_srs_from_ll(lon, lat)
|
||||
ll_to_utm = transformer(source_srs, target_srs)
|
||||
utm_to_ll = transformer(target_srs, source_srs)
|
||||
return ll_to_utm, utm_to_ll
|
||||
return ll_to_utm, utm_to_ll
|
||||
|
|
|
@ -7,11 +7,11 @@ import dateutil.parser
|
|||
import shutil
|
||||
import multiprocessing
|
||||
|
||||
from opendm.loghelpers import double_quote, args_to_dict
|
||||
from opendm.arghelpers import double_quote, args_to_dict
|
||||
from vmem import virtual_memory
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# No colors on Windows, sorry!
|
||||
if sys.platform == 'win32' or os.getenv('no_ansiesc'):
|
||||
# No colors on Windows (sorry !) or existing no_ansiesc env variable
|
||||
HEADER = ''
|
||||
OKBLUE = ''
|
||||
OKGREEN = ''
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
from shlex import _find_unsafe
|
||||
|
||||
def double_quote(s):
|
||||
"""Return a shell-escaped version of the string *s*."""
|
||||
if not s:
|
||||
return '""'
|
||||
if _find_unsafe(s) is None:
|
||||
return s
|
||||
|
||||
# use double quotes, and prefix double quotes with a \
|
||||
# the string $"b is then quoted as "$\"b"
|
||||
return '"' + s.replace('"', '\\\"') + '"'
|
||||
|
||||
def args_to_dict(args):
|
||||
args_dict = vars(args)
|
||||
result = {}
|
||||
for k in sorted(args_dict.keys()):
|
||||
# Skip _is_set keys
|
||||
if k.endswith("_is_set"):
|
||||
continue
|
||||
|
||||
# Don't leak token
|
||||
if k == 'sm_cluster' and args_dict[k] is not None:
|
||||
result[k] = True
|
||||
else:
|
||||
result[k] = args_dict[k]
|
||||
|
||||
return result
|
|
@ -5,10 +5,11 @@ from opendm import system
|
|||
from opendm import log
|
||||
from opendm import context
|
||||
from opendm import concurrency
|
||||
from opendm import point_cloud
|
||||
from scipy import signal
|
||||
import numpy as np
|
||||
|
||||
def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True):
|
||||
def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True, max_tiles=None):
|
||||
# Create DSM from point cloud
|
||||
|
||||
# Create temporary directory
|
||||
|
@ -19,22 +20,19 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
|
|||
os.mkdir(tmp_directory)
|
||||
log.ODM_INFO('Created temporary directory: %s' % tmp_directory)
|
||||
|
||||
radius_steps = [dsm_radius]
|
||||
for _ in range(2):
|
||||
radius_steps.append(radius_steps[-1] * math.sqrt(2)) # sqrt(2) is arbitrary
|
||||
|
||||
log.ODM_INFO('Creating DSM for 2.5D mesh')
|
||||
|
||||
commands.create_dem(
|
||||
inPointCloud,
|
||||
'mesh_dsm',
|
||||
output_type='max',
|
||||
radiuses=list(map(str, radius_steps)),
|
||||
radiuses=radius_steps,
|
||||
gapfill=True,
|
||||
outdir=tmp_directory,
|
||||
resolution=dsm_resolution,
|
||||
max_workers=available_cores,
|
||||
apply_smoothing=smooth_dsm
|
||||
apply_smoothing=smooth_dsm,
|
||||
max_tiles=max_tiles
|
||||
)
|
||||
|
||||
if method == 'gridded':
|
||||
|
@ -125,6 +123,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
|
|||
|
||||
system.run('"{reconstructmesh}" -i "{infile}" '
|
||||
'-o "{outfile}" '
|
||||
'--archive-type 3 '
|
||||
'--remove-spikes 0 --remove-spurious 0 --smooth 0 '
|
||||
'--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
|
||||
|
||||
|
@ -188,7 +187,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
|
|||
if threads < 1:
|
||||
break
|
||||
else:
|
||||
log.ODM_WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads, threads // 2))
|
||||
log.ODM_WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads * 2, threads))
|
||||
|
||||
|
||||
# Cleanup and reduce vertex count if necessary
|
||||
|
@ -201,6 +200,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
|
|||
|
||||
system.run('"{reconstructmesh}" -i "{infile}" '
|
||||
'-o "{outfile}" '
|
||||
'--archive-type 3 '
|
||||
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
|
||||
'--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
|
||||
|
||||
|
|
|
@ -181,8 +181,13 @@ def get_primary_band_name(multi_camera, user_band_name):
|
|||
if len(multi_camera) < 1:
|
||||
raise Exception("Invalid multi_camera list")
|
||||
|
||||
# multi_camera is already sorted by band_index
|
||||
# Pick RGB, or Green, or Blue, in this order, if available, otherwise first band
|
||||
if user_band_name == "auto":
|
||||
for aliases in [['rgb', 'redgreenblue'], ['green', 'g'], ['blue', 'b']]:
|
||||
for band in multi_camera:
|
||||
if band['name'].lower() in aliases:
|
||||
return band['name']
|
||||
|
||||
return multi_camera[0]['name']
|
||||
|
||||
for band in multi_camera:
|
||||
|
@ -504,6 +509,28 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7
|
|||
|
||||
# Detect SIFT features and compute descriptors.
|
||||
detector = cv2.SIFT_create(edgeThreshold=10, contrastThreshold=0.1)
|
||||
|
||||
h,w = image_gray.shape
|
||||
max_dim = max(h, w)
|
||||
|
||||
max_size = 2048
|
||||
if max_dim > max_size:
|
||||
if max_dim == w:
|
||||
f = max_size / w
|
||||
else:
|
||||
f = max_size / h
|
||||
image_gray = cv2.resize(image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA)
|
||||
h,w = image_gray.shape
|
||||
|
||||
if align_image_gray.shape[0] != image_gray.shape[0]:
|
||||
fx = image_gray.shape[1]/align_image_gray.shape[1]
|
||||
fy = image_gray.shape[0]/align_image_gray.shape[0]
|
||||
|
||||
align_image_gray = cv2.resize(align_image_gray, None,
|
||||
fx=fx,
|
||||
fy=fy,
|
||||
interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
|
||||
|
||||
kp_image, desc_image = detector.detectAndCompute(image_gray, None)
|
||||
kp_align_image, desc_align_image = detector.detectAndCompute(align_image_gray, None)
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou
|
|||
'lon': lon,
|
||||
'alt': alt,
|
||||
}
|
||||
system.run('Obj2Tiles "{input}" "{output}" --divisions {divisions} '.format(**kwargs))
|
||||
system.run('Obj2Tiles "{input}" "{output}" --divisions {divisions} --lat {lat} --lon {lon} --alt {alt} '.format(**kwargs))
|
||||
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot build 3D tiles textured model: %s" % str(e))
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
import os
|
||||
from opendm.ai import get_model
|
||||
from opendm import log
|
||||
from opendm.system import run
|
||||
from opendm import io
|
||||
|
||||
def classify(point_cloud, max_threads=8):
|
||||
tmp_output = io.related_file_path(point_cloud, postfix=".classified")
|
||||
if os.path.isfile(tmp_output):
|
||||
os.remove(tmp_output)
|
||||
|
||||
try:
|
||||
model = get_model("openpointclass",
|
||||
"https://github.com/uav4geo/OpenPointClass/releases/download/v1.1.3/vehicles-vegetation-buildings.zip",
|
||||
"v1.0.0",
|
||||
name="model.bin")
|
||||
|
||||
if model is not None:
|
||||
run('pcclassify "%s" "%s" "%s" -u -s 2,64' % (point_cloud, tmp_output, model), env_vars={'OMP_NUM_THREADS': max_threads})
|
||||
|
||||
if os.path.isfile(tmp_output):
|
||||
os.remove(point_cloud)
|
||||
os.rename(tmp_output, point_cloud)
|
||||
else:
|
||||
log.ODM_WARNING("Cannot classify using OpenPointClass (no output generated)")
|
||||
else:
|
||||
log.ODM_WARNING("Cannot download/access model from %s" % (model_url))
|
||||
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot classify using OpenPointClass: %s" % str(e))
|
||||
|
|
@ -85,7 +85,7 @@ def generate_kmz(orthophoto_file, output_file=None, outsize=None):
|
|||
system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
|
||||
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory()))
|
||||
|
||||
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir):
|
||||
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution):
|
||||
if args.crop > 0 or args.boundary:
|
||||
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
|
||||
|
||||
|
@ -99,7 +99,7 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti
|
|||
generate_kmz(orthophoto_file)
|
||||
|
||||
if args.tiles:
|
||||
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency)
|
||||
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
|
||||
|
||||
if args.cog:
|
||||
convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression)
|
||||
|
|
|
@ -13,7 +13,7 @@ from opendm import system
|
|||
from opendm import context
|
||||
from opendm import camera
|
||||
from opendm import location
|
||||
from opendm.photo import find_largest_photo_dim, find_largest_photo
|
||||
from opendm.photo import find_largest_photo_dims, find_largest_photo
|
||||
from opensfm.large import metadataset
|
||||
from opensfm.large import tools
|
||||
from opensfm.actions import undistort
|
||||
|
@ -49,11 +49,12 @@ class OSFMContext:
|
|||
else:
|
||||
log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tracks_file)
|
||||
|
||||
def reconstruct(self, rolling_shutter_correct=False, rerun=False):
|
||||
def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun=False):
|
||||
reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json')
|
||||
if not io.file_exists(reconstruction_file) or rerun:
|
||||
self.run('reconstruct')
|
||||
self.check_merge_partial_reconstructions()
|
||||
if merge_partial:
|
||||
self.check_merge_partial_reconstructions()
|
||||
else:
|
||||
log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % reconstruction_file)
|
||||
|
||||
|
@ -63,7 +64,6 @@ class OSFMContext:
|
|||
"Check that the images have enough overlap, "
|
||||
"that there are enough recognizable features "
|
||||
"and that the images are in focus. "
|
||||
"You could also try to increase the --min-num-features parameter."
|
||||
"The program will now exit.")
|
||||
|
||||
if rolling_shutter_correct:
|
||||
|
@ -76,7 +76,7 @@ class OSFMContext:
|
|||
|
||||
self.match_features(True)
|
||||
self.create_tracks(True)
|
||||
self.reconstruct(rolling_shutter_correct=False, rerun=True)
|
||||
self.reconstruct(rolling_shutter_correct=False, merge_partial=merge_partial, rerun=True)
|
||||
|
||||
self.touch(rs_file)
|
||||
else:
|
||||
|
@ -210,11 +210,25 @@ class OSFMContext:
|
|||
'lowest': 0.0675,
|
||||
}
|
||||
|
||||
max_dim = find_largest_photo_dim(photos)
|
||||
max_dims = find_largest_photo_dims(photos)
|
||||
|
||||
if max_dim > 0:
|
||||
if max_dims is not None:
|
||||
w, h = max_dims
|
||||
max_dim = max(w, h)
|
||||
log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim))
|
||||
feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality])
|
||||
|
||||
lower_limit = 320
|
||||
upper_limit = 4480
|
||||
megapixels = (w * h) / 1e6
|
||||
multiplier = 1
|
||||
|
||||
if megapixels < 2:
|
||||
multiplier = 2
|
||||
elif megapixels > 42:
|
||||
multiplier = 0.5
|
||||
|
||||
factor = min(1, feature_quality_scale[args.feature_quality] * multiplier)
|
||||
feature_process_size = min(upper_limit, max(lower_limit, int(max_dim * factor)))
|
||||
log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size)
|
||||
else:
|
||||
log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
|
||||
|
@ -226,6 +240,11 @@ class OSFMContext:
|
|||
else:
|
||||
matcher_graph_rounds = 50
|
||||
matcher_neighbors = 0
|
||||
|
||||
# Always use matcher-neighbors if less than 4 pictures
|
||||
if len(photos) <= 3:
|
||||
matcher_graph_rounds = 0
|
||||
matcher_neighbors = 3
|
||||
|
||||
config = [
|
||||
"use_exif_size: no",
|
||||
|
@ -245,6 +264,12 @@ class OSFMContext:
|
|||
"triangulation_type: ROBUST",
|
||||
"retriangulation_ratio: 2",
|
||||
]
|
||||
|
||||
if args.matcher_order > 0:
|
||||
if not reconstruction.is_georeferenced():
|
||||
config.append("matching_order_neighbors: %s" % args.matcher_order)
|
||||
else:
|
||||
log.ODM_WARNING("Georeferenced reconstruction, ignoring --matcher-order")
|
||||
|
||||
if args.camera_lens != 'auto':
|
||||
config.append("camera_projection_type: %s" % args.camera_lens.upper())
|
||||
|
@ -271,9 +296,8 @@ class OSFMContext:
|
|||
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
|
||||
|
||||
# GPU acceleration?
|
||||
if has_gpu(args):
|
||||
max_photo = find_largest_photo(photos)
|
||||
w, h = max_photo.width, max_photo.height
|
||||
if has_gpu(args) and max_dims is not None:
|
||||
w, h = max_dims
|
||||
if w > h:
|
||||
h = int((h / w) * feature_process_size)
|
||||
w = int(feature_process_size)
|
||||
|
@ -547,6 +571,8 @@ class OSFMContext:
|
|||
pdf_report.save_report("report.pdf")
|
||||
|
||||
if os.path.exists(osfm_report_path):
|
||||
if os.path.exists(report_path):
|
||||
os.unlink(report_path)
|
||||
shutil.move(osfm_report_path, report_path)
|
||||
else:
|
||||
log.ODM_WARNING("Report could not be generated")
|
||||
|
@ -767,3 +793,12 @@ def get_all_submodel_paths(submodels_path, *all_paths):
|
|||
result.append([os.path.join(submodels_path, f, ap) for ap in all_paths])
|
||||
|
||||
return result
|
||||
|
||||
def is_submodel(opensfm_root):
|
||||
# A bit hackish, but works without introducing additional markers / flags
|
||||
# Look at the path of the opensfm directory and see if "submodel_" is part of it
|
||||
parts = os.path.abspath(opensfm_root).split(os.path.sep)
|
||||
|
||||
return (len(parts) >= 2 and parts[-2][:9] == "submodel_") or \
|
||||
os.path.isfile(os.path.join(opensfm_root, "split_merge_stop_at_reconstruction.txt")) or \
|
||||
os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
|
|
@ -19,7 +19,7 @@ from xml.parsers.expat import ExpatError
|
|||
from opensfm.sensors import sensor_data
|
||||
from opensfm.geo import ecef_from_lla
|
||||
|
||||
projections = ['perspective', 'fisheye', 'brown', 'dual', 'equirectangular', 'spherical']
|
||||
projections = ['perspective', 'fisheye', 'fisheye_opencv', 'brown', 'dual', 'equirectangular', 'spherical']
|
||||
|
||||
def find_largest_photo_dims(photos):
|
||||
max_mp = 0
|
||||
|
@ -305,7 +305,7 @@ class ODM_Photo:
|
|||
|
||||
for xtags in xmp:
|
||||
try:
|
||||
band_name = self.get_xmp_tag(xtags, ['Camera:BandName', '@Camera:BandName'])
|
||||
band_name = self.get_xmp_tag(xtags, ['Camera:BandName', '@Camera:BandName', 'FLIR:BandName'])
|
||||
if band_name is not None:
|
||||
self.band_name = band_name.replace(" ", "")
|
||||
|
||||
|
@ -350,6 +350,7 @@ class ODM_Photo:
|
|||
'@drone-dji:CaptureUUID', # DJI
|
||||
'MicaSense:CaptureId', # MicaSense Altum
|
||||
'@Camera:ImageUniqueID', # sentera 6x
|
||||
'@Camera:CaptureUUID', # Parrot Sequoia
|
||||
])
|
||||
|
||||
self.set_attr_from_xmp_tag('gain', xtags, [
|
||||
|
@ -427,6 +428,12 @@ class ODM_Photo:
|
|||
camera_projection = self.get_xmp_tag(xtags, ['@Camera:ModelType', 'Camera:ModelType'])
|
||||
if camera_projection is not None:
|
||||
camera_projection = camera_projection.lower()
|
||||
|
||||
# Parrot Sequoia's "fisheye" model maps to "fisheye_opencv"
|
||||
# or better yet, replace all fisheye with fisheye_opencv, but wait to change API signature
|
||||
if camera_projection == "fisheye":
|
||||
camera_projection = "fisheye_opencv"
|
||||
|
||||
if camera_projection in projections:
|
||||
self.camera_projection = camera_projection
|
||||
|
||||
|
@ -611,9 +618,11 @@ class ODM_Photo:
|
|||
else:
|
||||
result.append(None)
|
||||
return result
|
||||
else:
|
||||
elif hasattr(tag.values, 'den'):
|
||||
return [float(tag.values.num) / float(tag.values.den) if tag.values.den != 0 else None]
|
||||
|
||||
else:
|
||||
return [None]
|
||||
|
||||
def float_value(self, tag):
|
||||
v = self.float_values(tag)
|
||||
if len(v) > 0:
|
||||
|
@ -622,6 +631,8 @@ class ODM_Photo:
|
|||
def int_values(self, tag):
|
||||
if isinstance(tag.values, list):
|
||||
return [int(v) for v in tag.values]
|
||||
elif isinstance(tag.values, str) and tag.values == '':
|
||||
return []
|
||||
else:
|
||||
return [int(tag.values)]
|
||||
|
||||
|
@ -915,3 +926,6 @@ class ODM_Photo:
|
|||
return self.width * self.height / 1e6
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
def is_make_model(self, make, model):
|
||||
return self.camera_make.lower() == make.lower() and self.camera_model.lower() == model.lower()
|
||||
|
|
|
@ -9,6 +9,8 @@ from opendm.concurrency import parallel_map
|
|||
from opendm.utils import double_quote
|
||||
from opendm.boundary import as_polygon, as_geojson
|
||||
from opendm.dem.pdal import run_pipeline
|
||||
from opendm.opc import classify
|
||||
from opendm.dem import commands
|
||||
|
||||
def ply_info(input_ply):
|
||||
if not os.path.exists(input_ply):
|
||||
|
@ -71,7 +73,7 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
|
|||
return [os.path.join(outdir, f) for f in os.listdir(outdir)]
|
||||
|
||||
|
||||
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, max_concurrency=1):
|
||||
def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviation=2.5, sample_radius=0, boundary=None, max_concurrency=1):
|
||||
"""
|
||||
Filters a point cloud
|
||||
"""
|
||||
|
@ -89,10 +91,11 @@ def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=
|
|||
log.ODM_INFO("Sampling points around a %sm radius" % sample_radius)
|
||||
args.append('--radius %s' % sample_radius)
|
||||
|
||||
if standard_deviation > 0 and meank > 0:
|
||||
log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
|
||||
args.append('--meank %s' % meank)
|
||||
args.append('--std %s' % standard_deviation)
|
||||
meank = 16
|
||||
log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
|
||||
args.append('--meank %s' % meank)
|
||||
args.append('--std %s' % standard_deviation)
|
||||
args.append('--stats "%s"' % output_stats)
|
||||
|
||||
if boundary is not None:
|
||||
log.ODM_INFO("Boundary {}".format(boundary))
|
||||
|
@ -107,6 +110,26 @@ def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=
|
|||
if not os.path.exists(output_point_cloud):
|
||||
log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))
|
||||
|
||||
|
||||
def get_spacing(stats_file, resolution_fallback=5.0):
|
||||
def fallback():
|
||||
log.ODM_WARNING("Cannot read %s, falling back to resolution estimate" % stats_file)
|
||||
return (resolution_fallback / 100.0) / 2.0
|
||||
|
||||
if not os.path.isfile(stats_file):
|
||||
return fallback()
|
||||
|
||||
with open(stats_file, 'r') as f:
|
||||
j = json.loads(f.read())
|
||||
if "spacing" in j:
|
||||
d = j["spacing"]
|
||||
if d > 0:
|
||||
return round(d, 3)
|
||||
else:
|
||||
return fallback()
|
||||
else:
|
||||
return fallback()
|
||||
|
||||
def export_info_json(pointcloud_path, info_file_path):
|
||||
system.run('pdal info --dimensions "X,Y,Z" "{0}" > "{1}"'.format(pointcloud_path, info_file_path))
|
||||
|
||||
|
@ -253,6 +276,32 @@ def merge_ply(input_point_cloud_files, output_file, dims=None):
|
|||
system.run(' '.join(cmd))
|
||||
|
||||
def post_point_cloud_steps(args, tree, rerun=False):
|
||||
# Classify and rectify before generating derivate files
|
||||
if args.pc_classify:
|
||||
pc_classify_marker = os.path.join(tree.odm_georeferencing, 'pc_classify_done.txt')
|
||||
|
||||
if not io.file_exists(pc_classify_marker) or rerun:
|
||||
log.ODM_INFO("Classifying {} using Simple Morphological Filter (1/2)".format(tree.odm_georeferencing_model_laz))
|
||||
commands.classify(tree.odm_georeferencing_model_laz,
|
||||
args.smrf_scalar,
|
||||
args.smrf_slope,
|
||||
args.smrf_threshold,
|
||||
args.smrf_window
|
||||
)
|
||||
|
||||
log.ODM_INFO("Classifying {} using OpenPointClass (2/2)".format(tree.odm_georeferencing_model_laz))
|
||||
classify(tree.odm_georeferencing_model_laz, args.max_concurrency)
|
||||
|
||||
with open(pc_classify_marker, 'w') as f:
|
||||
f.write('Classify: smrf\n')
|
||||
f.write('Scalar: {}\n'.format(args.smrf_scalar))
|
||||
f.write('Slope: {}\n'.format(args.smrf_slope))
|
||||
f.write('Threshold: {}\n'.format(args.smrf_threshold))
|
||||
f.write('Window: {}\n'.format(args.smrf_window))
|
||||
|
||||
if args.pc_rectify:
|
||||
commands.rectify(tree.odm_georeferencing_model_laz)
|
||||
|
||||
# XYZ point cloud output
|
||||
if args.pc_csv:
|
||||
log.ODM_INFO("Creating CSV file (XYZ format)")
|
||||
|
@ -290,4 +339,4 @@ def post_point_cloud_steps(args, tree, rerun=False):
|
|||
log.ODM_INFO("Creating Cloud Optimized Point Cloud (COPC)")
|
||||
|
||||
copc_output = io.related_file_path(tree.odm_georeferencing_model_laz, postfix=".copc")
|
||||
entwine.build_copc([tree.odm_georeferencing_model_laz], copc_output)
|
||||
entwine.build_copc([tree.odm_georeferencing_model_laz], copc_output, convert_rgb_8_to_16=True)
|
|
@ -448,7 +448,7 @@ class ReconstructionTask(Task):
|
|||
log.ODM_INFO("==================================")
|
||||
octx.feature_matching(self.params['rerun'])
|
||||
octx.create_tracks(self.params['rerun'])
|
||||
octx.reconstruct(self.params['rolling_shutter'], self.params['rerun'])
|
||||
octx.reconstruct(self.params['rolling_shutter'], True, self.params['rerun'])
|
||||
|
||||
def process_remote(self, done):
|
||||
octx = OSFMContext(self.path("opensfm"))
|
||||
|
|
|
@ -2,6 +2,7 @@ from opendm import log
|
|||
|
||||
# Make Model (lowercase) --> readout time (ms)
|
||||
RS_DATABASE = {
|
||||
'autel robotics xt701': 25, # Autel Evo II 8k
|
||||
'dji phantom vision fc200': 74, # Phantom 2
|
||||
|
||||
'dji fc300s': 33, # Phantom 3 Advanced
|
||||
|
@ -11,18 +12,22 @@ RS_DATABASE = {
|
|||
'dji fc330': 33, # Phantom 4
|
||||
'dji fc6310': 33, # Phantom 4 Professional
|
||||
|
||||
'dji fc7203': 20, # Mavic Mini v1
|
||||
'dji fc7203': lambda p: 19 if p.get_capture_megapixels() < 10 else 25, # DJI Mavic Mini v1 (at 16:9 => 9MP 19ms, at 4:3 => 12MP 25ms)
|
||||
'dji fc2103': 32, # DJI Mavic Air 1
|
||||
'dji fc3170': 27, # DJI Mavic Air 2
|
||||
'dji fc3411': 32, # DJI Mavic Air 2S
|
||||
|
||||
'dji fc220': 64, # DJI Mavic Pro (Platinum)
|
||||
'hasselblad l1d-20c': lambda p: 47 if p.get_capture_megapixels() < 17 else 56, # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout)
|
||||
'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
|
||||
|
||||
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
|
||||
|
||||
'dji fc350': 30, # Inspire 1
|
||||
|
||||
'dji mavic2-enterprise-advanced': 31, # DJI Mavic 2 Enterprise Advanced
|
||||
'dji zenmuse z30': 8, # DJI Zenmuse Z30
|
||||
|
||||
'yuneec e90': 44, # Yuneec E90
|
||||
|
||||
'gopro hero4 black': 30, # GoPro Hero 4 Black
|
||||
|
@ -34,6 +39,11 @@ RS_DATABASE = {
|
|||
|
||||
'fujifilm x-t2': 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera
|
||||
|
||||
'autel robotics xl724': 29, # Autel Nano+
|
||||
|
||||
'parrot anafi': 39, # Parrot Anafi
|
||||
|
||||
'autel robotics xt705': 30, # Autel EVO II pro
|
||||
|
||||
# Help us add more!
|
||||
# See: https://github.com/OpenDroneMap/RSCalibration for instructions
|
||||
|
|
|
@ -66,11 +66,12 @@ def sighandler(signum, frame):
|
|||
signal.signal(signal.SIGINT, sighandler)
|
||||
signal.signal(signal.SIGTERM, sighandler)
|
||||
|
||||
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_paths=context.python_packages_paths):
|
||||
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_paths=context.python_packages_paths, quiet=False):
|
||||
"""Run a system command"""
|
||||
global running_subprocesses
|
||||
|
||||
log.ODM_INFO('running %s' % cmd)
|
||||
if not quiet:
|
||||
log.ODM_INFO('running %s' % cmd)
|
||||
env = os.environ.copy()
|
||||
|
||||
sep = ":"
|
||||
|
@ -101,7 +102,8 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_path
|
|||
|
||||
retcode = p.wait()
|
||||
|
||||
log.logger.log_json_process(cmd, retcode, list(lines))
|
||||
if not quiet:
|
||||
log.logger.log_json_process(cmd, retcode, list(lines))
|
||||
|
||||
running_subprocesses.remove(p)
|
||||
if retcode < 0:
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
from opendm import log
|
||||
from opendm.thermal_tools import dji_unpack
|
||||
import cv2
|
||||
import os
|
||||
from opendm import log
|
||||
from opendm.thermal_tools import dji_unpack
|
||||
from opendm.exiftool import extract_raw_thermal_image_data
|
||||
from opendm.thermal_tools.thermal_utils import sensor_vals_to_temp
|
||||
|
||||
def resize_to_match(image, match_photo = None):
|
||||
"""
|
||||
|
@ -19,28 +21,26 @@ def resize_to_match(image, match_photo = None):
|
|||
interpolation=cv2.INTER_LANCZOS4)
|
||||
return image
|
||||
|
||||
def dn_to_temperature(photo, image, dataset_tree):
|
||||
def dn_to_temperature(photo, image, images_path):
|
||||
"""
|
||||
Convert Digital Number values to temperature (C) values
|
||||
:param photo ODM_Photo
|
||||
:param image numpy array containing image data
|
||||
:param dataset_tree path to original source image to read data using PIL for DJI thermal photos
|
||||
:param images_path path to original source image to read data using PIL for DJI thermal photos
|
||||
:return numpy array with temperature (C) image values
|
||||
"""
|
||||
|
||||
|
||||
|
||||
# Handle thermal bands
|
||||
if photo.is_thermal():
|
||||
# Every camera stores thermal information differently
|
||||
# The following will work for MicaSense Altum cameras
|
||||
# but not necessarily for others
|
||||
if photo.camera_make == "MicaSense" and photo.camera_model == "Altum":
|
||||
if photo.camera_make == "MicaSense" and photo.camera_model[:5] == "Altum":
|
||||
image = image.astype("float32")
|
||||
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
|
||||
image *= 0.01
|
||||
return image
|
||||
elif photo.camera_make == "DJI" and photo.camera_model == "ZH20T":
|
||||
elif photo.camera_make == "DJI" and photo.camera_model == "ZH20T":
|
||||
filename, file_extension = os.path.splitext(photo.filename)
|
||||
# DJI H20T high gain mode supports measurement of -40~150 celsius degrees
|
||||
if file_extension.lower() in [".tif", ".tiff"] and image.min() >= 23315: # Calibrated grayscale tif
|
||||
|
@ -51,11 +51,18 @@ def dn_to_temperature(photo, image, dataset_tree):
|
|||
else:
|
||||
return image
|
||||
elif photo.camera_make == "DJI" and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
|
||||
image = dji_unpack.extract_temperatures_dji(photo, image, dataset_tree)
|
||||
image = dji_unpack.extract_temperatures_dji(photo, image, images_path)
|
||||
image = image.astype("float32")
|
||||
return image
|
||||
else:
|
||||
log.ODM_WARNING("Unsupported camera [%s %s], thermal band will have digital numbers." % (photo.camera_make, photo.camera_model))
|
||||
try:
|
||||
params, image = extract_raw_thermal_image_data(os.path.join(images_path, photo.filename))
|
||||
image = sensor_vals_to_temp(image, **params)
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e)))
|
||||
|
||||
image = image.astype("float32")
|
||||
return image
|
||||
else:
|
||||
image = image.astype("float32")
|
||||
log.ODM_WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename)
|
||||
|
|
|
@ -1,271 +0,0 @@
|
|||
"""
|
||||
THIS IS WIP, DON'T USE THIS FILE, IT IS HERE FOR FURTHER IMPROVEMENT
|
||||
Tools for extracting thermal data from FLIR images.
|
||||
Derived from https://bitbucket.org/nimmerwoner/flyr/src/master/
|
||||
"""
|
||||
|
||||
import os
|
||||
from io import BufferedIOBase, BytesIO
|
||||
from typing import BinaryIO, Dict, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
# Constants
|
||||
SEGMENT_SEP = b"\xff"
|
||||
APP1_MARKER = b"\xe1"
|
||||
MAGIC_FLIR_DEF = b"FLIR\x00"
|
||||
|
||||
CHUNK_APP1_BYTES_COUNT = len(APP1_MARKER)
|
||||
CHUNK_LENGTH_BYTES_COUNT = 2
|
||||
CHUNK_MAGIC_BYTES_COUNT = len(MAGIC_FLIR_DEF)
|
||||
CHUNK_SKIP_BYTES_COUNT = 1
|
||||
CHUNK_NUM_BYTES_COUNT = 1
|
||||
CHUNK_TOT_BYTES_COUNT = 1
|
||||
CHUNK_PARTIAL_METADATA_LENGTH = CHUNK_APP1_BYTES_COUNT + CHUNK_LENGTH_BYTES_COUNT + CHUNK_MAGIC_BYTES_COUNT
|
||||
CHUNK_METADATA_LENGTH = (
|
||||
CHUNK_PARTIAL_METADATA_LENGTH + CHUNK_SKIP_BYTES_COUNT + CHUNK_NUM_BYTES_COUNT + CHUNK_TOT_BYTES_COUNT
|
||||
)
|
||||
|
||||
|
||||
def unpack(path_or_stream: Union[str, BinaryIO]) -> np.ndarray:
|
||||
"""Unpacks the FLIR image, meaning that it will return the thermal data embedded in the image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path_or_stream : Union[str, BinaryIO]
|
||||
Either a path (string) to a FLIR file, or a byte stream such as
|
||||
BytesIO or file opened as `open(file_path, "rb")`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
FlyrThermogram
|
||||
When successful, a FlyrThermogram object containing thermogram data.
|
||||
"""
|
||||
if isinstance(path_or_stream, str) and os.path.isfile(path_or_stream):
|
||||
with open(path_or_stream, "rb") as flirh:
|
||||
return unpack(flirh)
|
||||
elif isinstance(path_or_stream, BufferedIOBase):
|
||||
stream = path_or_stream
|
||||
flir_app1_stream = extract_flir_app1(stream)
|
||||
flir_records = parse_flir_app1(flir_app1_stream)
|
||||
raw_np = parse_thermal(flir_app1_stream, flir_records)
|
||||
|
||||
return raw_np
|
||||
else:
|
||||
raise ValueError("Incorrect input")
|
||||
|
||||
|
||||
def extract_flir_app1(stream: BinaryIO) -> BinaryIO:
|
||||
"""Extracts the FLIR APP1 bytes.
|
||||
|
||||
Parameters
|
||||
---------
|
||||
stream : BinaryIO
|
||||
A full bytes stream of a JPEG file, expected to be a FLIR file.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
When the file is invalid in one the next ways, a
|
||||
ValueError is thrown.
|
||||
|
||||
* File is not a JPEG
|
||||
* A FLIR chunk number occurs more than once
|
||||
* The total chunks count is inconsistent over multiple chunks
|
||||
* No APP1 segments are successfully parsed
|
||||
|
||||
Returns
|
||||
-------
|
||||
BinaryIO
|
||||
A bytes stream of the APP1 FLIR segments
|
||||
"""
|
||||
# Check JPEG-ness
|
||||
_ = stream.read(2)
|
||||
|
||||
chunks_count: Optional[int] = None
|
||||
chunks: Dict[int, bytes] = {}
|
||||
while True:
|
||||
b = stream.read(1)
|
||||
if b == b"":
|
||||
break
|
||||
|
||||
if b != SEGMENT_SEP:
|
||||
continue
|
||||
|
||||
parsed_chunk = parse_flir_chunk(stream, chunks_count)
|
||||
if not parsed_chunk:
|
||||
continue
|
||||
|
||||
chunks_count, chunk_num, chunk = parsed_chunk
|
||||
chunk_exists = chunks.get(chunk_num, None) is not None
|
||||
if chunk_exists:
|
||||
raise ValueError("Invalid FLIR: duplicate chunk number")
|
||||
chunks[chunk_num] = chunk
|
||||
|
||||
# Encountered all chunks, break out of loop to process found metadata
|
||||
if chunk_num == chunks_count:
|
||||
break
|
||||
|
||||
if chunks_count is None:
|
||||
raise ValueError("Invalid FLIR: no metadata encountered")
|
||||
|
||||
flir_app1_bytes = b""
|
||||
for chunk_num in range(chunks_count + 1):
|
||||
flir_app1_bytes += chunks[chunk_num]
|
||||
|
||||
flir_app1_stream = BytesIO(flir_app1_bytes)
|
||||
flir_app1_stream.seek(0)
|
||||
return flir_app1_stream
|
||||
|
||||
|
||||
def parse_flir_chunk(stream: BinaryIO, chunks_count: Optional[int]) -> Optional[Tuple[int, int, bytes]]:
|
||||
"""Parse flir chunk."""
|
||||
# Parse the chunk header. Headers are as follows (definition with example):
|
||||
#
|
||||
# \xff\xe1<length: 2 bytes>FLIR\x00\x01<chunk nr: 1 byte><chunk count: 1 byte>
|
||||
# \xff\xe1\xff\xfeFLIR\x00\x01\x01\x0b
|
||||
#
|
||||
# Meaning: Exif APP1, 65534 long, FLIR chunk 1 out of 12
|
||||
marker = stream.read(CHUNK_APP1_BYTES_COUNT)
|
||||
|
||||
length_bytes = stream.read(CHUNK_LENGTH_BYTES_COUNT)
|
||||
length = int.from_bytes(length_bytes, "big")
|
||||
length -= CHUNK_METADATA_LENGTH
|
||||
magic_flir = stream.read(CHUNK_MAGIC_BYTES_COUNT)
|
||||
|
||||
if not (marker == APP1_MARKER and magic_flir == MAGIC_FLIR_DEF):
|
||||
# Seek back to just after byte b and continue searching for chunks
|
||||
stream.seek(-len(marker) - len(length_bytes) - len(magic_flir), 1)
|
||||
return None
|
||||
|
||||
stream.seek(1, 1) # skip 1 byte, unsure what it is for
|
||||
|
||||
chunk_num = int.from_bytes(stream.read(CHUNK_NUM_BYTES_COUNT), "big")
|
||||
chunks_tot = int.from_bytes(stream.read(CHUNK_TOT_BYTES_COUNT), "big")
|
||||
|
||||
# Remember total chunks to verify metadata consistency
|
||||
if chunks_count is None:
|
||||
chunks_count = chunks_tot
|
||||
|
||||
if ( # Check whether chunk metadata is consistent
|
||||
chunks_tot is None or chunk_num < 0 or chunk_num > chunks_tot or chunks_tot != chunks_count
|
||||
):
|
||||
raise ValueError(f"Invalid FLIR: inconsistent total chunks, should be 0 or greater, but is {chunks_tot}")
|
||||
|
||||
return chunks_tot, chunk_num, stream.read(length + 1)
|
||||
|
||||
|
||||
def parse_thermal(stream: BinaryIO, records: Dict[int, Tuple[int, int, int, int]]) -> np.ndarray:
|
||||
"""Parse thermal."""
|
||||
RECORD_IDX_RAW_DATA = 1
|
||||
raw_data_md = records[RECORD_IDX_RAW_DATA]
|
||||
_, _, raw_data = parse_raw_data(stream, raw_data_md)
|
||||
return raw_data
|
||||
|
||||
|
||||
def parse_flir_app1(stream: BinaryIO) -> Dict[int, Tuple[int, int, int, int]]:
|
||||
"""Parse flir app1."""
|
||||
# 0x00 - string[4] file format ID = "FFF\0"
|
||||
# 0x04 - string[16] file creator: seen "\0","MTX IR\0","CAMCTRL\0"
|
||||
# 0x14 - int32u file format version = 100
|
||||
# 0x18 - int32u offset to record directory
|
||||
# 0x1c - int32u number of entries in record directory
|
||||
# 0x20 - int32u next free index ID = 2
|
||||
# 0x24 - int16u swap pattern = 0 (?)
|
||||
# 0x28 - int16u[7] spares
|
||||
# 0x34 - int32u[2] reserved
|
||||
# 0x3c - int32u checksum
|
||||
|
||||
# 1. Read 0x40 bytes and verify that its contents equals AFF\0 or FFF\0
|
||||
_ = stream.read(4)
|
||||
|
||||
# 2. Read FLIR record directory metadata (ref 3)
|
||||
stream.seek(16, 1)
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
record_dir_offset = int.from_bytes(stream.read(4), "big")
|
||||
record_dir_entries_count = int.from_bytes(stream.read(4), "big")
|
||||
stream.seek(28, 1)
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
|
||||
# 3. Read record directory (which is a FLIR record entry repeated
|
||||
# `record_dir_entries_count` times)
|
||||
stream.seek(record_dir_offset)
|
||||
record_dir_stream = BytesIO(stream.read(32 * record_dir_entries_count))
|
||||
|
||||
# First parse the record metadata
|
||||
record_details: Dict[int, Tuple[int, int, int, int]] = {}
|
||||
for record_nr in range(record_dir_entries_count):
|
||||
record_dir_stream.seek(0)
|
||||
details = parse_flir_record_metadata(stream, record_nr)
|
||||
if details:
|
||||
record_details[details[1]] = details
|
||||
|
||||
# Then parse the actual records
|
||||
# for (entry_idx, type, offset, length) in record_details:
|
||||
# parse_record = record_parsers[type]
|
||||
# stream.seek(offset)
|
||||
# record = BytesIO(stream.read(length + 36)) # + 36 needed to find end
|
||||
# parse_record(record, offset, length)
|
||||
|
||||
return record_details
|
||||
|
||||
|
||||
def parse_flir_record_metadata(stream: BinaryIO, record_nr: int) -> Optional[Tuple[int, int, int, int]]:
|
||||
"""Parse flir record metadata."""
|
||||
# FLIR record entry (ref 3):
|
||||
# 0x00 - int16u record type
|
||||
# 0x02 - int16u record subtype: RawData 1=BE, 2=LE, 3=PNG; 1 for other record types
|
||||
# 0x04 - int32u record version: seen 0x64,0x66,0x67,0x68,0x6f,0x104
|
||||
# 0x08 - int32u index id = 1
|
||||
# 0x0c - int32u record offset from start of FLIR data
|
||||
# 0x10 - int32u record length
|
||||
# 0x14 - int32u parent = 0 (?)
|
||||
# 0x18 - int32u object number = 0 (?)
|
||||
# 0x1c - int32u checksum: 0 for no checksum
|
||||
entry = 32 * record_nr
|
||||
stream.seek(entry)
|
||||
record_type = int.from_bytes(stream.read(2), "big")
|
||||
if record_type < 1:
|
||||
return None
|
||||
|
||||
_ = int.from_bytes(stream.read(2), "big")
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
record_offset = int.from_bytes(stream.read(4), "big")
|
||||
record_length = int.from_bytes(stream.read(4), "big")
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
_ = int.from_bytes(stream.read(4), "big")
|
||||
return (entry, record_type, record_offset, record_length)
|
||||
|
||||
|
||||
def parse_raw_data(stream: BinaryIO, metadata: Tuple[int, int, int, int]):
|
||||
"""Parse raw data."""
|
||||
(_, _, offset, length) = metadata
|
||||
stream.seek(offset)
|
||||
|
||||
stream.seek(2, 1)
|
||||
width = int.from_bytes(stream.read(2), "little")
|
||||
height = int.from_bytes(stream.read(2), "little")
|
||||
|
||||
stream.seek(offset + 32)
|
||||
|
||||
# Read the bytes with the raw thermal data and decode using PIL
|
||||
thermal_bytes = stream.read(length)
|
||||
thermal_stream = BytesIO(thermal_bytes)
|
||||
thermal_img = Image.open(thermal_stream)
|
||||
thermal_np = np.array(thermal_img)
|
||||
|
||||
# Check shape
|
||||
if thermal_np.shape != (height, width):
|
||||
msg = "Invalid FLIR: metadata's width and height don't match thermal data's actual width\
|
||||
and height ({} vs ({}, {})"
|
||||
msg = msg.format(thermal_np.shape, height, width)
|
||||
raise ValueError(msg)
|
||||
|
||||
# FLIR PNG data is in the wrong byte order, fix that
|
||||
fix_byte_order = np.vectorize(lambda x: (x >> 8) + ((x & 0x00FF) << 8))
|
||||
thermal_np = fix_byte_order(thermal_np)
|
||||
|
||||
return width, height, thermal_np
|
|
@ -1,16 +1,25 @@
|
|||
import os
|
||||
import sys
|
||||
import math
|
||||
from opendm import log
|
||||
from opendm import system
|
||||
from opendm import io
|
||||
|
||||
def generate_tiles(geotiff, output_dir, max_concurrency):
|
||||
gdal2tiles = os.path.join(os.path.dirname(__file__), "gdal2tiles.py")
|
||||
system.run('%s "%s" --processes %s -z 5-21 -n -w none "%s" "%s"' % (sys.executable, gdal2tiles, max_concurrency, geotiff, output_dir))
|
||||
def generate_tiles(geotiff, output_dir, max_concurrency, resolution):
|
||||
circumference_earth_cm = 2*math.pi*637_813_700
|
||||
px_per_tile = 256
|
||||
resolution_equator_cm = circumference_earth_cm/px_per_tile
|
||||
zoom = math.ceil(math.log(resolution_equator_cm/resolution, 2))
|
||||
|
||||
def generate_orthophoto_tiles(geotiff, output_dir, max_concurrency):
|
||||
min_zoom = 5 # 4.89 km/px
|
||||
max_zoom = min(zoom, 23) # No deeper zoom than 23 (1.86 cm/px at equator)
|
||||
|
||||
gdal2tiles = os.path.join(os.path.dirname(__file__), "gdal2tiles.py")
|
||||
system.run('%s "%s" --processes %s -z %s-%s -n -w none "%s" "%s"' % (sys.executable, gdal2tiles, max_concurrency, min_zoom, max_zoom, geotiff, output_dir))
|
||||
|
||||
def generate_orthophoto_tiles(geotiff, output_dir, max_concurrency, resolution):
|
||||
try:
|
||||
generate_tiles(geotiff, output_dir, max_concurrency)
|
||||
generate_tiles(geotiff, output_dir, max_concurrency, resolution)
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot generate orthophoto tiles: %s" % str(e))
|
||||
|
||||
|
@ -37,10 +46,10 @@ def generate_colored_hillshade(geotiff):
|
|||
log.ODM_WARNING("Cannot generate colored hillshade: %s" % str(e))
|
||||
return (None, None, None)
|
||||
|
||||
def generate_dem_tiles(geotiff, output_dir, max_concurrency):
|
||||
def generate_dem_tiles(geotiff, output_dir, max_concurrency, resolution):
|
||||
try:
|
||||
colored_dem, hillshade_dem, colored_hillshade_dem = generate_colored_hillshade(geotiff)
|
||||
generate_tiles(colored_hillshade_dem, output_dir, max_concurrency)
|
||||
generate_tiles(colored_hillshade_dem, output_dir, max_concurrency, resolution)
|
||||
|
||||
# Cleanup
|
||||
for f in [colored_dem, hillshade_dem, colored_hillshade_dem]:
|
||||
|
|
|
@ -13,6 +13,7 @@ from opendm import log
|
|||
from opendm import io
|
||||
from opendm import system
|
||||
from opendm import context
|
||||
from opendm import multispectral
|
||||
|
||||
from opendm.progress import progressbc
|
||||
from opendm.photo import ODM_Photo
|
||||
|
@ -27,7 +28,7 @@ class ODM_Reconstruction(object):
|
|||
self.gcp = None
|
||||
self.multi_camera = self.detect_multi_camera()
|
||||
self.filter_photos()
|
||||
|
||||
|
||||
def detect_multi_camera(self):
|
||||
"""
|
||||
Looks at the reconstruction photos and determines if this
|
||||
|
@ -45,22 +46,88 @@ class ODM_Reconstruction(object):
|
|||
band_photos[p.band_name].append(p)
|
||||
|
||||
bands_count = len(band_photos)
|
||||
if bands_count >= 2 and bands_count <= 8:
|
||||
|
||||
# Band name with the minimum number of photos
|
||||
max_band_name = None
|
||||
max_photos = -1
|
||||
for band_name in band_photos:
|
||||
if len(band_photos[band_name]) > max_photos:
|
||||
max_band_name = band_name
|
||||
max_photos = len(band_photos[band_name])
|
||||
|
||||
if bands_count >= 2 and bands_count <= 10:
|
||||
# Validate that all bands have the same number of images,
|
||||
# otherwise this is not a multi-camera setup
|
||||
img_per_band = len(band_photos[p.band_name])
|
||||
for band in band_photos:
|
||||
if len(band_photos[band]) != img_per_band:
|
||||
log.ODM_ERROR("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted. Please include all necessary files to process all bands and try again." % (band, band_photos[band][0].filename, len(band_photos[band]), img_per_band))
|
||||
raise RuntimeError("Invalid multi-camera images")
|
||||
img_per_band = len(band_photos[max_band_name])
|
||||
|
||||
mc = []
|
||||
for band_name in band_indexes:
|
||||
mc.append({'name': band_name, 'photos': band_photos[band_name]})
|
||||
|
||||
# Sort by band index
|
||||
mc.sort(key=lambda x: band_indexes[x['name']])
|
||||
filter_missing = False
|
||||
for band in band_photos:
|
||||
if len(band_photos[band]) < img_per_band:
|
||||
log.ODM_WARNING("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted." % (band, band_photos[band][0].filename, len(band_photos[band]), len(band_photos[max_band_name])))
|
||||
filter_missing = True
|
||||
|
||||
if filter_missing:
|
||||
# Calculate files to ignore
|
||||
_, p2s = multispectral.compute_band_maps(mc, max_band_name)
|
||||
|
||||
max_files_per_band = 0
|
||||
|
||||
for filename in p2s:
|
||||
max_files_per_band = max(max_files_per_band, len(p2s[filename]))
|
||||
|
||||
for filename in p2s:
|
||||
if len(p2s[filename]) < max_files_per_band:
|
||||
photos_to_remove = p2s[filename] + [p for p in self.photos if p.filename == filename]
|
||||
for photo in photos_to_remove:
|
||||
log.ODM_WARNING("Excluding %s" % photo.filename)
|
||||
|
||||
self.photos = [p for p in self.photos if p != photo]
|
||||
for i in range(len(mc)):
|
||||
mc[i]['photos'] = [p for p in mc[i]['photos'] if p != photo]
|
||||
|
||||
log.ODM_INFO("New image count: %s" % len(self.photos))
|
||||
|
||||
# We enforce a normalized band order for all bands that we can identify
|
||||
# and rely on the manufacturer's band_indexes as a fallback for all others
|
||||
normalized_band_order = {
|
||||
'RGB': '0',
|
||||
'REDGREENBLUE': '0',
|
||||
|
||||
'RED': '1',
|
||||
'R': '1',
|
||||
|
||||
'GREEN': '2',
|
||||
'G': '2',
|
||||
|
||||
'BLUE': '3',
|
||||
'B': '3',
|
||||
|
||||
'NIR': '4',
|
||||
'N': '4',
|
||||
|
||||
'REDEDGE': '5',
|
||||
'RE': '5',
|
||||
|
||||
'PANCHRO': '6',
|
||||
|
||||
'LWIR': '7',
|
||||
'L': '7',
|
||||
}
|
||||
|
||||
for band_name in band_indexes:
|
||||
if band_name.upper() not in normalized_band_order:
|
||||
log.ODM_WARNING(f"Cannot identify order for {band_name} band, using manufacturer suggested index instead")
|
||||
|
||||
# Sort
|
||||
mc.sort(key=lambda x: normalized_band_order.get(x['name'].upper(), '9' + band_indexes[x['name']]))
|
||||
|
||||
for c, d in enumerate(mc):
|
||||
log.ODM_INFO(f"Band {c + 1}: {d['name']}")
|
||||
|
||||
return mc
|
||||
|
||||
return None
|
||||
|
@ -82,6 +149,12 @@ class ODM_Reconstruction(object):
|
|||
if 'rgb' in bands or 'redgreenblue' in bands:
|
||||
if 'red' in bands and 'green' in bands and 'blue' in bands:
|
||||
bands_to_remove.append(bands['rgb'] if 'rgb' in bands else bands['redgreenblue'])
|
||||
|
||||
# Mavic 3M's RGB camera lens are too different than the multispectral ones
|
||||
# so we drop the RGB channel instead
|
||||
elif self.photos[0].is_make_model("DJI", "M3M") and 'red' in bands and 'green' in bands:
|
||||
bands_to_remove.append(bands['rgb'] if 'rgb' in bands else bands['redgreenblue'])
|
||||
|
||||
else:
|
||||
for b in ['red', 'green', 'blue']:
|
||||
if b in bands:
|
||||
|
@ -290,6 +363,7 @@ class ODM_Tree(object):
|
|||
|
||||
# filter points
|
||||
self.filtered_point_cloud = os.path.join(self.odm_filterpoints, "point_cloud.ply")
|
||||
self.filtered_point_cloud_stats = os.path.join(self.odm_filterpoints, "point_cloud_stats.json")
|
||||
|
||||
# odm_meshing
|
||||
self.odm_mesh = os.path.join(self.odm_meshing, 'odm_mesh.ply')
|
||||
|
|
|
@ -4,7 +4,7 @@ import json
|
|||
from opendm import log
|
||||
from opendm.photo import find_largest_photo_dims
|
||||
from osgeo import gdal
|
||||
from opendm.loghelpers import double_quote
|
||||
from opendm.arghelpers import double_quote
|
||||
|
||||
class NumpyEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
|
|
|
@ -54,8 +54,10 @@ class SrtFileParser:
|
|||
if not self.gps_data:
|
||||
for d in self.data:
|
||||
lat, lon, alt = d.get('latitude'), d.get('longitude'), d.get('altitude')
|
||||
if alt is None:
|
||||
alt = 0
|
||||
tm = d.get('start')
|
||||
|
||||
|
||||
if lat is not None and lon is not None:
|
||||
if self.ll_to_utm is None:
|
||||
self.ll_to_utm, self.utm_to_ll = location.utm_transformers_from_ll(lon, lat)
|
||||
|
@ -122,6 +124,25 @@ class SrtFileParser:
|
|||
# 00:00:00,000 --> 00:00:01,000
|
||||
# F/2.8, SS 206.14, ISO 150, EV 0, GPS (-82.6669, 27.7716, 10), D 2.80m, H 0.00m, H.S 0.00m/s, V.S 0.00m/s
|
||||
|
||||
# DJI Phantom4 RTK
|
||||
# 36
|
||||
# 00:00:35,000 --> 00:00:36,000
|
||||
# F/6.3, SS 60, ISO 100, EV 0, RTK (120.083799, 30.213635, 28), HOME (120.084146, 30.214243, 103.55m), D 75.36m, H 76.19m, H.S 0.30m/s, V.S 0.00m/s, F.PRY (-5.3°, 2.1°, 28.3°), G.PRY (-40.0°, 0.0°, 28.2°)
|
||||
|
||||
# DJI Unknown Model #1
|
||||
# 1
|
||||
# 00:00:00,000 --> 00:00:00,033
|
||||
# <font size="28">SrtCnt : 1, DiffTime : 33ms
|
||||
# 2024-01-18 10:23:26.397
|
||||
# [iso : 150] [shutter : 1/5000.0] [fnum : 170] [ev : 0] [ct : 5023] [color_md : default] [focal_len : 240] [dzoom_ratio: 10000, delta:0],[latitude: -22.724555] [longitude: -47.602414] [rel_alt: 0.300 abs_alt: 549.679] </font>
|
||||
|
||||
# DJI Mavic 2 Zoom
|
||||
# 1
|
||||
# 00:00:00,000 --> 00:00:00,041
|
||||
# <font size="36">FrameCnt : 1, DiffTime : 41ms
|
||||
# 2023-07-15 11:55:16,320,933
|
||||
# [iso : 100] [shutter : 1/400.0] [fnum : 280] [ev : 0] [ct : 5818] [color_md : default] [focal_len : 240] [latitude : 0.000000] [longtitude : 0.000000] [altitude: 0.000000] </font>
|
||||
|
||||
with open(self.filename, 'r') as f:
|
||||
|
||||
iso = None
|
||||
|
@ -192,15 +213,21 @@ class SrtFileParser:
|
|||
|
||||
latitude = match_single([
|
||||
("latitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
|
||||
("latitude : ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
|
||||
("GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
|
||||
("RTK \([-+]?\d+\.\d+, (-?\d+\.\d+), -?\d+\)", lambda v: float(v) if v != 0 else None),
|
||||
], line)
|
||||
|
||||
longitude = match_single([
|
||||
("longitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
|
||||
("longtitude : ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
|
||||
("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
|
||||
("RTK \((-?\d+\.\d+), [-+]?\d+\.\d+, -?\d+\)", lambda v: float(v) if v != 0 else None),
|
||||
], line)
|
||||
|
||||
altitude = match_single([
|
||||
("altitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
|
||||
("GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)", lambda v: float(v) if v != 0 else None),
|
||||
("RTK \([-+]?\d+\.\d+, [-+]?\d+\.\d+, (-?\d+)\)", lambda v: float(v) if v != 0 else None),
|
||||
("abs_alt: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
|
||||
], line)
|
|
@ -4,10 +4,10 @@ beautifulsoup4==4.9.3
|
|||
cloudpickle==1.6.0
|
||||
edt==2.0.2
|
||||
ODMExifRead==3.0.4
|
||||
Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin'
|
||||
Fiona==1.8.17 ; sys_platform == 'linux'
|
||||
Fiona==1.9.1 ; sys_platform == 'darwin'
|
||||
https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
|
||||
joblib==1.1.0
|
||||
laspy[lazrs]==2.3.0
|
||||
lxml==4.6.1
|
||||
matplotlib==3.3.3
|
||||
networkx==2.5
|
||||
|
@ -19,7 +19,8 @@ pyproj==3.3.1
|
|||
Pysolar==0.9
|
||||
pytz==2020.4
|
||||
PyYAML==5.1
|
||||
rasterio==1.2.3 ; sys_platform == 'linux' or sys_platform == 'darwin'
|
||||
rasterio==1.2.3 ; sys_platform == 'linux'
|
||||
rasterio==1.3.6 ; sys_platform == 'darwin'
|
||||
https://github.com/OpenDroneMap/windows-deps/raw/main/rasterio-1.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
|
||||
https://github.com/OpenDroneMap/windows-deps/raw/main/GDAL-3.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
|
||||
repoze.lru==0.7
|
||||
|
|
29
run.py
29
run.py
|
@ -13,7 +13,7 @@ from opendm import system
|
|||
from opendm import io
|
||||
from opendm.progress import progressbc
|
||||
from opendm.utils import get_processing_results_paths, rm_r
|
||||
from opendm.loghelpers import args_to_dict
|
||||
from opendm.arghelpers import args_to_dict, save_opts, compare_args, find_rerun_stage
|
||||
|
||||
from stages.odm_app import ODMApp
|
||||
|
||||
|
@ -29,20 +29,26 @@ if __name__ == '__main__':
|
|||
|
||||
log.ODM_INFO('Initializing ODM %s - %s' % (odm_version(), system.now()))
|
||||
|
||||
progressbc.set_project_name(args.name)
|
||||
args.project_path = os.path.join(args.project_path, args.name)
|
||||
|
||||
if not io.dir_exists(args.project_path):
|
||||
log.ODM_ERROR('Directory %s does not exist.' % args.name)
|
||||
exit(1)
|
||||
|
||||
opts_json = os.path.join(args.project_path, "options.json")
|
||||
auto_rerun_stage, opts_diff = find_rerun_stage(opts_json, args, config.rerun_stages, config.processopts)
|
||||
if auto_rerun_stage is not None and len(auto_rerun_stage) > 0:
|
||||
log.ODM_INFO("Rerunning from: %s" % auto_rerun_stage[0])
|
||||
args.rerun_from = auto_rerun_stage
|
||||
|
||||
# Print args
|
||||
args_dict = args_to_dict(args)
|
||||
log.ODM_INFO('==============')
|
||||
for k in args_dict.keys():
|
||||
log.ODM_INFO('%s: %s' % (k, args_dict[k]))
|
||||
log.ODM_INFO('%s: %s%s' % (k, args_dict[k], ' [changed]' if k in opts_diff else ''))
|
||||
log.ODM_INFO('==============')
|
||||
|
||||
progressbc.set_project_name(args.name)
|
||||
|
||||
# Add project dir if doesn't exist
|
||||
args.project_path = os.path.join(args.project_path, args.name)
|
||||
if not io.dir_exists(args.project_path):
|
||||
log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name)
|
||||
system.mkdir_p(os.path.abspath(args.project_path))
|
||||
|
||||
|
||||
# If user asks to rerun everything, delete all of the existing progress directories.
|
||||
if args.rerun_all:
|
||||
|
@ -57,6 +63,9 @@ if __name__ == '__main__':
|
|||
|
||||
app = ODMApp(args)
|
||||
retcode = app.execute()
|
||||
|
||||
if retcode == 0:
|
||||
save_opts(opts_json, args)
|
||||
|
||||
# Do not show ASCII art for local submodels runs
|
||||
if retcode == 0 and not "submodels" in args.project_path:
|
||||
|
|
|
@ -135,7 +135,7 @@ class ODMLoadDatasetStage(types.ODM_Stage):
|
|||
"input": video_files,
|
||||
"output": images_dir,
|
||||
|
||||
"blur_threshold": 300,
|
||||
"blur_threshold": 200,
|
||||
"distance_threshold": 10,
|
||||
"black_ratio_threshold": 0.98,
|
||||
"pixel_black_threshold": 0.30,
|
||||
|
@ -330,3 +330,8 @@ class ODMLoadDatasetStage(types.ODM_Stage):
|
|||
log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename)
|
||||
args.sfm_algorithm = 'incremental'
|
||||
break
|
||||
|
||||
# Rolling shutter cannot be done in non-georeferenced datasets
|
||||
if args.rolling_shutter and not reconstruction.is_georeferenced():
|
||||
log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction")
|
||||
args.rolling_shutter = False
|
||||
|
|
|
@ -81,14 +81,11 @@ class ODMMvsTexStage(types.ODM_Stage):
|
|||
|
||||
# Format arguments to fit Mvs-Texturing app
|
||||
skipGlobalSeamLeveling = ""
|
||||
skipLocalSeamLeveling = ""
|
||||
keepUnseenFaces = ""
|
||||
nadir = ""
|
||||
|
||||
if args.texturing_skip_global_seam_leveling:
|
||||
skipGlobalSeamLeveling = "--skip_global_seam_leveling"
|
||||
if args.texturing_skip_local_seam_leveling:
|
||||
skipLocalSeamLeveling = "--skip_local_seam_leveling"
|
||||
if args.texturing_keep_unseen_faces:
|
||||
keepUnseenFaces = "--keep_unseen_faces"
|
||||
if (r['nadir']):
|
||||
|
@ -102,7 +99,6 @@ class ODMMvsTexStage(types.ODM_Stage):
|
|||
'dataTerm': 'gmi',
|
||||
'outlierRemovalType': 'gauss_clamping',
|
||||
'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
|
||||
'skipLocalSeamLeveling': skipLocalSeamLeveling,
|
||||
'keepUnseenFaces': keepUnseenFaces,
|
||||
'toneMapping': 'none',
|
||||
'nadirMode': nadir,
|
||||
|
@ -114,7 +110,7 @@ class ODMMvsTexStage(types.ODM_Stage):
|
|||
|
||||
mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')
|
||||
|
||||
# Make sure tmp directory is empty
|
||||
# mvstex creates a tmp directory, so make sure it is empty
|
||||
if io.dir_exists(mvs_tmp_dir):
|
||||
log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir))
|
||||
shutil.rmtree(mvs_tmp_dir)
|
||||
|
@ -125,7 +121,6 @@ class ODMMvsTexStage(types.ODM_Stage):
|
|||
'-t {toneMapping} '
|
||||
'{intermediate} '
|
||||
'{skipGlobalSeamLeveling} '
|
||||
'{skipLocalSeamLeveling} '
|
||||
'{keepUnseenFaces} '
|
||||
'{nadirMode} '
|
||||
'{labelingFile} '
|
||||
|
|
|
@ -27,6 +27,7 @@ class ODMApp:
|
|||
Initializes the application and defines the ODM application pipeline stages
|
||||
"""
|
||||
json_log_paths = [os.path.join(args.project_path, "log.json")]
|
||||
|
||||
if args.copy_to:
|
||||
json_log_paths.append(args.copy_to)
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ from opendm import pseudogeo
|
|||
from opendm.tiles.tiler import generate_dem_tiles
|
||||
from opendm.cogeo import convert_to_cogeo
|
||||
|
||||
|
||||
class ODMDEMStage(types.ODM_Stage):
|
||||
def process(self, args, outputs):
|
||||
tree = outputs['tree']
|
||||
|
@ -29,17 +28,12 @@ class ODMDEMStage(types.ODM_Stage):
|
|||
ignore_resolution = True
|
||||
pseudo_georeference = True
|
||||
|
||||
# It is probably not reasonable to have accurate DEMs a the same resolution as the source photos, so reduce it
|
||||
# by a factor!
|
||||
gsd_scaling = 2.0
|
||||
|
||||
resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction,
|
||||
gsd_scaling=gsd_scaling,
|
||||
gsd_scaling=1.0,
|
||||
ignore_gsd=args.ignore_gsd,
|
||||
ignore_resolution=ignore_resolution and args.ignore_gsd,
|
||||
has_gcp=reconstruction.has_gcp())
|
||||
|
||||
log.ODM_INFO('Classify: ' + str(args.pc_classify))
|
||||
log.ODM_INFO('Create DSM: ' + str(args.dsm))
|
||||
log.ODM_INFO('Create DTM: ' + str(args.dtm))
|
||||
log.ODM_INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found)))
|
||||
|
@ -49,31 +43,9 @@ class ODMDEMStage(types.ODM_Stage):
|
|||
if not io.dir_exists(odm_dem_root):
|
||||
system.mkdir_p(odm_dem_root)
|
||||
|
||||
if args.pc_classify and pc_model_found:
|
||||
pc_classify_marker = os.path.join(odm_dem_root, 'pc_classify_done.txt')
|
||||
|
||||
if not io.file_exists(pc_classify_marker) or self.rerun():
|
||||
log.ODM_INFO("Classifying {} using Simple Morphological Filter".format(dem_input))
|
||||
commands.classify(dem_input,
|
||||
args.smrf_scalar,
|
||||
args.smrf_slope,
|
||||
args.smrf_threshold,
|
||||
args.smrf_window
|
||||
)
|
||||
|
||||
with open(pc_classify_marker, 'w') as f:
|
||||
f.write('Classify: smrf\n')
|
||||
f.write('Scalar: {}\n'.format(args.smrf_scalar))
|
||||
f.write('Slope: {}\n'.format(args.smrf_slope))
|
||||
f.write('Threshold: {}\n'.format(args.smrf_threshold))
|
||||
f.write('Window: {}\n'.format(args.smrf_window))
|
||||
|
||||
progress = 20
|
||||
self.update_progress(progress)
|
||||
|
||||
if args.pc_rectify:
|
||||
commands.rectify(dem_input, False)
|
||||
|
||||
# Do we need to process anything here?
|
||||
if (args.dsm or args.dtm) and pc_model_found:
|
||||
dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif')
|
||||
|
@ -88,9 +60,7 @@ class ODMDEMStage(types.ODM_Stage):
|
|||
if args.dsm or (args.dtm and args.dem_euclidean_map): products.append('dsm')
|
||||
if args.dtm: products.append('dtm')
|
||||
|
||||
radius_steps = [(resolution / 100.0) / 2.0]
|
||||
for _ in range(args.dem_gapfill_steps - 1):
|
||||
radius_steps.append(radius_steps[-1] * math.sqrt(2)) # sqrt(2) is arbitrary, maybe there's a better value?
|
||||
radius_steps = commands.get_dem_radius_steps(tree.filtered_point_cloud_stats, args.dem_gapfill_steps, resolution)
|
||||
|
||||
for product in products:
|
||||
commands.create_dem(
|
||||
|
@ -103,7 +73,8 @@ class ODMDEMStage(types.ODM_Stage):
|
|||
resolution=resolution / 100.0,
|
||||
decimation=args.dem_decimation,
|
||||
max_workers=args.max_concurrency,
|
||||
keep_unfilled_copy=args.dem_euclidean_map
|
||||
with_euclidean_map=args.dem_euclidean_map,
|
||||
max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2)
|
||||
)
|
||||
|
||||
dem_geotiff_path = os.path.join(odm_dem_root, "{}.tif".format(product))
|
||||
|
@ -113,27 +84,16 @@ class ODMDEMStage(types.ODM_Stage):
|
|||
# Crop DEM
|
||||
Cropper.crop(bounds_file_path, dem_geotiff_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)
|
||||
|
||||
if args.dem_euclidean_map:
|
||||
unfilled_dem_path = io.related_file_path(dem_geotiff_path, postfix=".unfilled")
|
||||
|
||||
if args.crop > 0 or args.boundary:
|
||||
# Crop unfilled DEM
|
||||
Cropper.crop(bounds_file_path, unfilled_dem_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)
|
||||
|
||||
commands.compute_euclidean_map(unfilled_dem_path,
|
||||
io.related_file_path(dem_geotiff_path, postfix=".euclideand"),
|
||||
overwrite=True)
|
||||
|
||||
if pseudo_georeference:
|
||||
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
|
||||
|
||||
if args.tiles:
|
||||
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency)
|
||||
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution)
|
||||
|
||||
if args.cog:
|
||||
convert_to_cogeo(dem_geotiff_path, max_workers=args.max_concurrency)
|
||||
|
||||
progress += 30
|
||||
progress += 40
|
||||
self.update_progress(progress)
|
||||
else:
|
||||
log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root)
|
||||
|
|
|
@ -36,7 +36,7 @@ class ODMFilterPoints(types.ODM_Stage):
|
|||
else:
|
||||
avg_gsd = gsd.opensfm_reconstruction_average_gsd(tree.opensfm_reconstruction)
|
||||
if avg_gsd is not None:
|
||||
boundary_distance = avg_gsd * 20 # 20 is arbitrary
|
||||
boundary_distance = avg_gsd * 100 # 100 is arbitrary
|
||||
|
||||
if boundary_distance is not None:
|
||||
outputs['boundary'] = compute_boundary_from_shots(tree.opensfm_reconstruction, boundary_distance, reconstruction.get_proj_offset())
|
||||
|
@ -49,7 +49,7 @@ class ODMFilterPoints(types.ODM_Stage):
|
|||
else:
|
||||
log.ODM_WARNING("Not a georeferenced reconstruction, will ignore --auto-boundary")
|
||||
|
||||
point_cloud.filter(inputPointCloud, tree.filtered_point_cloud,
|
||||
point_cloud.filter(inputPointCloud, tree.filtered_point_cloud, tree.filtered_point_cloud_stats,
|
||||
standard_deviation=args.pc_filter,
|
||||
sample_radius=args.pc_sample,
|
||||
boundary=boundary_offset(outputs.get('boundary'), reconstruction.get_proj_offset()),
|
||||
|
|
|
@ -5,6 +5,7 @@ import pipes
|
|||
import fiona
|
||||
import fiona.crs
|
||||
import json
|
||||
import zipfile
|
||||
from collections import OrderedDict
|
||||
from pyproj import CRS
|
||||
|
||||
|
@ -32,6 +33,7 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
gcp_export_file = tree.path("odm_georeferencing", "ground_control_points.gpkg")
|
||||
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
|
||||
gcp_geojson_export_file = tree.path("odm_georeferencing", "ground_control_points.geojson")
|
||||
gcp_geojson_zip_export_file = tree.path("odm_georeferencing", "ground_control_points.zip")
|
||||
unaligned_model = io.related_file_path(tree.odm_georeferencing_model_laz, postfix="_unaligned")
|
||||
if os.path.isfile(unaligned_model) and self.rerun():
|
||||
os.unlink(unaligned_model)
|
||||
|
@ -54,7 +56,7 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
}
|
||||
|
||||
# Write GeoPackage
|
||||
with fiona.open(gcp_export_file, 'w', driver="GPKG",
|
||||
with fiona.open(gcp_export_file, 'w', driver="GPKG",
|
||||
crs=fiona.crs.from_string(reconstruction.georef.proj4()),
|
||||
schema=gcp_schema) as f:
|
||||
for gcp in gcps:
|
||||
|
@ -72,13 +74,13 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
('error_z', gcp['error'][2]),
|
||||
])
|
||||
})
|
||||
|
||||
|
||||
# Write GML
|
||||
try:
|
||||
system.run('ogr2ogr -of GML "{}" "{}"'.format(gcp_gml_export_file, gcp_export_file))
|
||||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e))
|
||||
|
||||
|
||||
# Write GeoJSON
|
||||
geojson = {
|
||||
'type': 'FeatureCollection',
|
||||
|
@ -101,42 +103,48 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
},
|
||||
'properties': properties
|
||||
})
|
||||
|
||||
|
||||
with open(gcp_geojson_export_file, 'w') as f:
|
||||
f.write(json.dumps(geojson, indent=4))
|
||||
|
||||
with zipfile.ZipFile(gcp_geojson_zip_export_file, 'w', compression=zipfile.ZIP_LZMA) as f:
|
||||
f.write(gcp_geojson_export_file, arcname=os.path.basename(gcp_geojson_export_file))
|
||||
|
||||
else:
|
||||
log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file)
|
||||
|
||||
if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun():
|
||||
cmd = ('pdal translate -i "%s" -o \"%s\"' % (tree.filtered_point_cloud, tree.odm_georeferencing_model_laz))
|
||||
cmd = f'pdal translate -i "{tree.filtered_point_cloud}" -o \"{tree.odm_georeferencing_model_laz}\"'
|
||||
stages = ["ferry"]
|
||||
params = [
|
||||
'--filters.ferry.dimensions="views => UserData"',
|
||||
'--writers.las.compression="lazip"',
|
||||
'--filters.ferry.dimensions="views => UserData"'
|
||||
]
|
||||
|
||||
if reconstruction.is_georeferenced():
|
||||
log.ODM_INFO("Georeferencing point cloud")
|
||||
|
||||
stages.append("transformation")
|
||||
utmoffset = reconstruction.georef.utm_offset()
|
||||
params += [
|
||||
'--filters.transformation.matrix="1 0 0 %s 0 1 0 %s 0 0 1 0 0 0 0 1"' % reconstruction.georef.utm_offset(),
|
||||
'--writers.las.offset_x=%s' % reconstruction.georef.utm_east_offset,
|
||||
'--writers.las.offset_y=%s' % reconstruction.georef.utm_north_offset,
|
||||
f'--filters.transformation.matrix="1 0 0 {utmoffset[0]} 0 1 0 {utmoffset[1]} 0 0 1 0 0 0 0 1"',
|
||||
f'--writers.las.offset_x={reconstruction.georef.utm_east_offset}' ,
|
||||
f'--writers.las.offset_y={reconstruction.georef.utm_north_offset}',
|
||||
'--writers.las.scale_x=0.001',
|
||||
'--writers.las.scale_y=0.001',
|
||||
'--writers.las.scale_z=0.001',
|
||||
'--writers.las.offset_z=0',
|
||||
'--writers.las.a_srs="%s"' % reconstruction.georef.proj4()
|
||||
f'--writers.las.a_srs="{reconstruction.georef.proj4()}"' # HOBU this should maybe be WKT
|
||||
]
|
||||
|
||||
if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file):
|
||||
log.ODM_INFO("Embedding GCP info in point cloud")
|
||||
params += [
|
||||
'--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM_GCP\\\", \\\"description\\\": \\\"Ground Control Points (GML)\\\"}"' % gcp_gml_export_file.replace(os.sep, "/")
|
||||
]
|
||||
|
||||
if reconstruction.has_gcp() and io.file_exists(gcp_geojson_zip_export_file):
|
||||
if os.path.getsize(gcp_geojson_zip_export_file) <= 65535:
|
||||
log.ODM_INFO("Embedding GCP info in point cloud")
|
||||
params += [
|
||||
'--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM\\\", \\\"record_id\\\": 2, \\\"description\\\": \\\"Ground Control Points (zip)\\\"}"' % gcp_geojson_zip_export_file.replace(os.sep, "/")
|
||||
]
|
||||
else:
|
||||
log.ODM_WARNING("Cannot embed GCP info in point cloud, %s is too large" % gcp_geojson_zip_export_file)
|
||||
|
||||
system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params))
|
||||
|
||||
self.update_progress(50)
|
||||
|
@ -144,27 +152,27 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
if args.crop > 0:
|
||||
log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud")
|
||||
cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model')
|
||||
|
||||
|
||||
if args.fast_orthophoto:
|
||||
decimation_step = 4
|
||||
else:
|
||||
decimation_step = 40
|
||||
|
||||
|
||||
# More aggressive decimation for large datasets
|
||||
if not args.fast_orthophoto:
|
||||
decimation_step *= int(len(reconstruction.photos) / 1000) + 1
|
||||
decimation_step = min(decimation_step, 95)
|
||||
|
||||
|
||||
try:
|
||||
cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop,
|
||||
cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop,
|
||||
decimation_step=decimation_step)
|
||||
except:
|
||||
log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping")
|
||||
args.crop = 0
|
||||
|
||||
|
||||
if 'boundary' in outputs and args.crop == 0:
|
||||
log.ODM_INFO("Using boundary JSON as cropping area")
|
||||
|
||||
|
||||
bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz)
|
||||
bounds_json = bounds_base + ".bounds.geojson"
|
||||
bounds_gpkg = bounds_base + ".bounds.gpkg"
|
||||
|
@ -207,8 +215,7 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
os.rename(unaligned_model, tree.odm_georeferencing_model_laz)
|
||||
|
||||
# Align textured models
|
||||
for texturing in [tree.odm_texturing, tree.odm_25dtexturing]:
|
||||
obj = os.path.join(texturing, "odm_textured_model_geo.obj")
|
||||
def transform_textured_model(obj):
|
||||
if os.path.isfile(obj):
|
||||
unaligned_obj = io.related_file_path(obj, postfix="_unaligned")
|
||||
if os.path.isfile(unaligned_obj):
|
||||
|
@ -220,7 +227,18 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
except Exception as e:
|
||||
log.ODM_WARNING("Cannot transform textured model: %s" % str(e))
|
||||
os.rename(unaligned_obj, obj)
|
||||
|
||||
|
||||
for texturing in [tree.odm_texturing, tree.odm_25dtexturing]:
|
||||
if reconstruction.multi_camera:
|
||||
primary = get_primary_band_name(reconstruction.multi_camera, args.primary_band)
|
||||
for band in reconstruction.multi_camera:
|
||||
subdir = "" if band['name'] == primary else band['name'].lower()
|
||||
obj = os.path.join(texturing, subdir, "odm_textured_model_geo.obj")
|
||||
transform_textured_model(obj)
|
||||
else:
|
||||
obj = os.path.join(texturing, "odm_textured_model_geo.obj")
|
||||
transform_textured_model(obj)
|
||||
|
||||
with open(tree.odm_georeferencing_alignment_matrix, "w") as f:
|
||||
f.write(np_to_json(a_matrix))
|
||||
else:
|
||||
|
@ -234,8 +252,8 @@ class ODMGeoreferencingStage(types.ODM_Stage):
|
|||
else:
|
||||
log.ODM_WARNING('Found a valid georeferenced model in: %s'
|
||||
% tree.odm_georeferencing_model_laz)
|
||||
|
||||
|
||||
if args.optimize_disk_space and io.file_exists(tree.odm_georeferencing_model_laz) and io.file_exists(tree.filtered_point_cloud):
|
||||
os.remove(tree.filtered_point_cloud)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ from opendm import context
|
|||
from opendm import mesh
|
||||
from opendm import gsd
|
||||
from opendm import types
|
||||
from opendm.dem import commands
|
||||
|
||||
class ODMeshingStage(types.ODM_Stage):
|
||||
def process(self, args, outputs):
|
||||
|
@ -40,35 +41,26 @@ class ODMeshingStage(types.ODM_Stage):
|
|||
if not io.file_exists(tree.odm_25dmesh) or self.rerun():
|
||||
|
||||
log.ODM_INFO('Writing ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh)
|
||||
ortho_resolution = gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction,
|
||||
ignore_gsd=args.ignore_gsd,
|
||||
ignore_resolution=(not reconstruction.is_georeferenced()) and args.ignore_gsd,
|
||||
has_gcp=reconstruction.has_gcp()) / 100.0
|
||||
|
||||
dsm_multiplier = max(1.0, gsd.rounded_gsd(tree.opensfm_reconstruction, default_value=4, ndigits=3, ignore_gsd=args.ignore_gsd))
|
||||
|
||||
# A good DSM size depends on the flight altitude.
|
||||
# Flights at low altitude need more details (higher resolution)
|
||||
# Flights at higher altitude benefit from smoother surfaces (lower resolution)
|
||||
dsm_resolution = ortho_resolution * dsm_multiplier
|
||||
|
||||
dsm_radius = dsm_resolution * math.sqrt(2)
|
||||
|
||||
if args.fast_orthophoto:
|
||||
dsm_radius *= 2
|
||||
dsm_resolution *= 8
|
||||
multiplier = math.pi / 2.0
|
||||
radius_steps = commands.get_dem_radius_steps(tree.filtered_point_cloud_stats, 3, args.orthophoto_resolution, multiplier=multiplier)
|
||||
dsm_resolution = radius_steps[0] / multiplier
|
||||
|
||||
log.ODM_INFO('ODM 2.5D DSM resolution: %s' % dsm_resolution)
|
||||
|
||||
if args.fast_orthophoto:
|
||||
dsm_resolution *= 8.0
|
||||
|
||||
mesh.create_25dmesh(tree.filtered_point_cloud, tree.odm_25dmesh,
|
||||
dsm_radius=dsm_radius,
|
||||
radius_steps,
|
||||
dsm_resolution=dsm_resolution,
|
||||
depth=self.params.get('oct_tree'),
|
||||
maxVertexCount=self.params.get('max_vertex'),
|
||||
samples=self.params.get('samples'),
|
||||
available_cores=args.max_concurrency,
|
||||
method='poisson' if args.fast_orthophoto else 'gridded',
|
||||
smooth_dsm=True)
|
||||
smooth_dsm=True,
|
||||
max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2))
|
||||
else:
|
||||
log.ODM_WARNING('Found a valid ODM 2.5D Mesh file in: %s' %
|
||||
tree.odm_25dmesh)
|
||||
|
|
|
@ -7,7 +7,8 @@ from opendm import context
|
|||
from opendm import types
|
||||
from opendm import gsd
|
||||
from opendm import orthophoto
|
||||
from opendm.concurrency import get_max_memory
|
||||
from opendm.osfm import is_submodel
|
||||
from opendm.concurrency import get_max_memory_mb
|
||||
from opendm.cutline import compute_cutline
|
||||
from opendm.utils import double_quote
|
||||
from opendm import pseudogeo
|
||||
|
@ -28,10 +29,10 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
|
|||
|
||||
if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun():
|
||||
|
||||
resolution = 1.0 / (gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction,
|
||||
ignore_gsd=args.ignore_gsd,
|
||||
ignore_resolution=(not reconstruction.is_georeferenced()) and args.ignore_gsd,
|
||||
has_gcp=reconstruction.has_gcp()) / 100.0)
|
||||
resolution = gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction,
|
||||
ignore_gsd=args.ignore_gsd,
|
||||
ignore_resolution=(not reconstruction.is_georeferenced()) and args.ignore_gsd,
|
||||
has_gcp=reconstruction.has_gcp())
|
||||
|
||||
# odm_orthophoto definitions
|
||||
kwargs = {
|
||||
|
@ -39,9 +40,14 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
|
|||
'log': tree.odm_orthophoto_log,
|
||||
'ortho': tree.odm_orthophoto_render,
|
||||
'corners': tree.odm_orthophoto_corners,
|
||||
'res': resolution,
|
||||
'res': 1.0 / (resolution/100.0),
|
||||
'bands': '',
|
||||
'depth_idx': ''
|
||||
'depth_idx': '',
|
||||
'inpaint': '',
|
||||
'utm_offsets': '',
|
||||
'a_srs': '',
|
||||
'vars': '',
|
||||
'gdal_configs': '--config GDAL_CACHEMAX %s' % (get_max_memory_mb() * 1024 * 1024)
|
||||
}
|
||||
|
||||
models = []
|
||||
|
@ -79,59 +85,37 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
|
|||
else:
|
||||
models.append(os.path.join(base_dir, model_file))
|
||||
|
||||
# Perform edge inpainting on georeferenced RGB datasets
|
||||
if reconstruction.is_georeferenced():
|
||||
kwargs['inpaint'] = "-inpaintThreshold 1.0"
|
||||
|
||||
# Thermal dataset with single band
|
||||
if reconstruction.photos[0].band_name.upper() == "LWIR":
|
||||
kwargs['bands'] = '-bands lwir'
|
||||
|
||||
kwargs['models'] = ','.join(map(double_quote, models))
|
||||
|
||||
if reconstruction.is_georeferenced():
|
||||
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
|
||||
kwargs['utm_offsets'] = "-utm_north_offset %s -utm_east_offset %s" % (reconstruction.georef.utm_north_offset, reconstruction.georef.utm_east_offset)
|
||||
kwargs['a_srs'] = "-a_srs \"%s\"" % reconstruction.georef.proj4()
|
||||
kwargs['vars'] = ' '.join(['-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars])
|
||||
kwargs['ortho'] = tree.odm_orthophoto_tif # Render directly to final file
|
||||
|
||||
# run odm_orthophoto
|
||||
log.ODM_INFO('Creating GeoTIFF')
|
||||
system.run('"{odm_ortho_bin}" -inputFiles {models} '
|
||||
'-logFile "{log}" -outputFile "{ortho}" -resolution {res} -verbose '
|
||||
'-outputCornerFile "{corners}" {bands} {depth_idx}'.format(**kwargs))
|
||||
'-outputCornerFile "{corners}" {bands} {depth_idx} {inpaint} '
|
||||
'{utm_offsets} {a_srs} {vars} {gdal_configs} '.format(**kwargs), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
|
||||
|
||||
# Create georeferenced GeoTiff
|
||||
geotiffcreated = False
|
||||
|
||||
if reconstruction.is_georeferenced():
|
||||
ulx = uly = lrx = lry = 0.0
|
||||
with open(tree.odm_orthophoto_corners) as f:
|
||||
for lineNumber, line in enumerate(f):
|
||||
if lineNumber == 0:
|
||||
tokens = line.split(' ')
|
||||
if len(tokens) == 4:
|
||||
ulx = float(tokens[0]) + \
|
||||
float(reconstruction.georef.utm_east_offset)
|
||||
lry = float(tokens[1]) + \
|
||||
float(reconstruction.georef.utm_north_offset)
|
||||
lrx = float(tokens[2]) + \
|
||||
float(reconstruction.georef.utm_east_offset)
|
||||
uly = float(tokens[3]) + \
|
||||
float(reconstruction.georef.utm_north_offset)
|
||||
log.ODM_INFO('Creating GeoTIFF')
|
||||
|
||||
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
|
||||
|
||||
kwargs = {
|
||||
'ulx': ulx,
|
||||
'uly': uly,
|
||||
'lrx': lrx,
|
||||
'lry': lry,
|
||||
'vars': ' '.join(['-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars]),
|
||||
'proj': reconstruction.georef.proj4(),
|
||||
'input': tree.odm_orthophoto_render,
|
||||
'output': tree.odm_orthophoto_tif,
|
||||
'log': tree.odm_orthophoto_tif_log,
|
||||
'max_memory': get_max_memory(),
|
||||
}
|
||||
|
||||
system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} '
|
||||
'{vars} '
|
||||
'-a_srs \"{proj}\" '
|
||||
'--config GDAL_CACHEMAX {max_memory}% '
|
||||
'--config GDAL_TIFF_INTERNAL_MASK YES '
|
||||
'"{input}" "{output}" > "{log}"'.format(**kwargs))
|
||||
|
||||
bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
|
||||
|
||||
# Cutline computation, before cropping
|
||||
# We want to use the full orthophoto, not the cropped one.
|
||||
submodel_run = is_submodel(tree.opensfm)
|
||||
if args.orthophoto_cutline:
|
||||
cutline_file = os.path.join(tree.odm_orthophoto, "cutline.gpkg")
|
||||
|
||||
|
@ -140,22 +124,24 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
|
|||
cutline_file,
|
||||
args.max_concurrency,
|
||||
scale=0.25)
|
||||
|
||||
if submodel_run:
|
||||
orthophoto.compute_mask_raster(tree.odm_orthophoto_tif, cutline_file,
|
||||
os.path.join(tree.odm_orthophoto, "odm_orthophoto_cut.tif"),
|
||||
blend_distance=20, only_max_coords_feature=True)
|
||||
else:
|
||||
log.ODM_INFO("Not a submodel run, skipping mask raster generation")
|
||||
|
||||
orthophoto.compute_mask_raster(tree.odm_orthophoto_tif, cutline_file,
|
||||
os.path.join(tree.odm_orthophoto, "odm_orthophoto_cut.tif"),
|
||||
blend_distance=20, only_max_coords_feature=True)
|
||||
|
||||
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles)
|
||||
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution)
|
||||
|
||||
# Generate feathered orthophoto also
|
||||
if args.orthophoto_cutline:
|
||||
if args.orthophoto_cutline and submodel_run:
|
||||
orthophoto.feather_raster(tree.odm_orthophoto_tif,
|
||||
os.path.join(tree.odm_orthophoto, "odm_orthophoto_feathered.tif"),
|
||||
blend_distance=20
|
||||
)
|
||||
|
||||
geotiffcreated = True
|
||||
if not geotiffcreated:
|
||||
else:
|
||||
if io.file_exists(tree.odm_orthophoto_render):
|
||||
pseudogeo.add_pseudo_georeferencing(tree.odm_orthophoto_render)
|
||||
log.ODM_INFO("Renaming %s --> %s" % (tree.odm_orthophoto_render, tree.odm_orthophoto_tif))
|
||||
|
|
|
@ -19,6 +19,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
reconstruction = outputs['reconstruction']
|
||||
photos = reconstruction.photos
|
||||
octx = OSFMContext(tree.opensfm)
|
||||
pc_tile = False
|
||||
|
||||
if not photos:
|
||||
raise system.ExitException('Not enough photos in photos array to start OpenMVS')
|
||||
|
@ -64,12 +65,13 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
filter_point_th = -20
|
||||
|
||||
config = [
|
||||
" --resolution-level %s" % int(resolution_level),
|
||||
"--resolution-level %s" % int(resolution_level),
|
||||
'--dense-config-file "%s"' % densify_ini_file,
|
||||
"--max-resolution %s" % int(outputs['undist_image_max_size']),
|
||||
"--max-threads %s" % args.max_concurrency,
|
||||
"--number-views-fuse %s" % number_views_fuse,
|
||||
"--sub-resolution-levels %s" % subres_levels,
|
||||
"--archive-type 3",
|
||||
'-w "%s"' % depthmaps_dir,
|
||||
"-v 0"
|
||||
]
|
||||
|
@ -77,14 +79,10 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
gpu_config = []
|
||||
use_gpu = has_gpu(args)
|
||||
if use_gpu:
|
||||
#gpu_config.append("--cuda-device -3")
|
||||
gpu_config.append("--cuda-device -1")
|
||||
else:
|
||||
gpu_config.append("--cuda-device -2")
|
||||
|
||||
if args.pc_tile:
|
||||
config.append("--fusion-mode 1")
|
||||
|
||||
extra_config = []
|
||||
|
||||
if args.pc_skip_geometric:
|
||||
|
@ -96,12 +94,13 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
extra_config.append("--ignore-mask-label 0")
|
||||
|
||||
with open(densify_ini_file, 'w+') as f:
|
||||
f.write("Optimize = 7\n")
|
||||
f.write("Optimize = 7\nMin Views Filter = 1\n")
|
||||
|
||||
def run_densify():
|
||||
system.run('"%s" "%s" %s' % (context.omvs_densify_path,
|
||||
openmvs_scene_file,
|
||||
' '.join(config + gpu_config + extra_config)))
|
||||
|
||||
try:
|
||||
run_densify()
|
||||
except system.SubprocessException as e:
|
||||
|
@ -111,9 +110,9 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.")
|
||||
gpu_config = ["--cuda-device -2"]
|
||||
run_densify()
|
||||
elif (e.errorCode == 137 or e.errorCode == 3221226505) and not args.pc_tile:
|
||||
elif (e.errorCode == 137 or e.errorCode == 143 or e.errorCode == 3221226505) and not pc_tile:
|
||||
log.ODM_WARNING("OpenMVS ran out of memory, we're going to turn on tiling to see if we can process this.")
|
||||
args.pc_tile = True
|
||||
pc_tile = True
|
||||
config.append("--fusion-mode 1")
|
||||
run_densify()
|
||||
else:
|
||||
|
@ -123,15 +122,15 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
files_to_remove = []
|
||||
scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')
|
||||
|
||||
if args.pc_tile:
|
||||
if pc_tile:
|
||||
log.ODM_INFO("Computing sub-scenes")
|
||||
|
||||
subscene_densify_ini_file = os.path.join(tree.openmvs, 'subscene-config.ini')
|
||||
with open(subscene_densify_ini_file, 'w+') as f:
|
||||
f.write("Optimize = 0\n")
|
||||
f.write("Optimize = 0\nEstimation Geometric Iters = 0\nMin Views Filter = 1\n")
|
||||
|
||||
config = [
|
||||
"--sub-scene-area 660000",
|
||||
"--sub-scene-area 660000", # 8000
|
||||
"--max-threads %s" % args.max_concurrency,
|
||||
'-w "%s"' % depthmaps_dir,
|
||||
"-v 0",
|
||||
|
@ -162,9 +161,13 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
config = [
|
||||
'--resolution-level %s' % int(resolution_level),
|
||||
'--max-resolution %s' % int(outputs['undist_image_max_size']),
|
||||
"--sub-resolution-levels %s" % subres_levels,
|
||||
'--dense-config-file "%s"' % subscene_densify_ini_file,
|
||||
'--number-views-fuse %s' % number_views_fuse,
|
||||
'--max-threads %s' % args.max_concurrency,
|
||||
'--archive-type 3',
|
||||
'--postprocess-dmaps 0',
|
||||
'--geometric-iters 0',
|
||||
'-w "%s"' % depthmaps_dir,
|
||||
'-v 0',
|
||||
]
|
||||
|
@ -180,7 +183,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
else:
|
||||
# Filter
|
||||
if args.pc_filter > 0:
|
||||
system.run('"%s" "%s" --filter-point-cloud %s -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, filter_point_th, ' '.join(gpu_config)))
|
||||
system.run('"%s" "%s" --filter-point-cloud %s -v 0 --archive-type 3 %s' % (context.omvs_densify_path, scene_dense_mvs, filter_point_th, ' '.join(gpu_config)))
|
||||
else:
|
||||
# Just rename
|
||||
log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply))
|
||||
|
@ -220,7 +223,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
try:
|
||||
system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config)))
|
||||
except system.SubprocessException as e:
|
||||
if e.errorCode == 137 or e.errorCode == 3221226505:
|
||||
if e.errorCode == 137 or e.errorCode == 143 or e.errorCode == 3221226505:
|
||||
log.ODM_WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.")
|
||||
skip_filtering()
|
||||
else:
|
||||
|
|
|
@ -35,7 +35,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
|
|||
octx.feature_matching(self.rerun())
|
||||
self.update_progress(30)
|
||||
octx.create_tracks(self.rerun())
|
||||
octx.reconstruct(args.rolling_shutter, self.rerun())
|
||||
octx.reconstruct(args.rolling_shutter, reconstruction.is_georeferenced() and (not args.sfm_no_partial), self.rerun())
|
||||
octx.extract_cameras(tree.path("cameras.json"), self.rerun())
|
||||
self.update_progress(70)
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ class ODMSplitStage(types.ODM_Stage):
|
|||
log.ODM_INFO("Reconstructing %s" % sp)
|
||||
local_sp_octx = OSFMContext(sp)
|
||||
local_sp_octx.create_tracks(self.rerun())
|
||||
local_sp_octx.reconstruct(args.rolling_shutter, self.rerun())
|
||||
local_sp_octx.reconstruct(args.rolling_shutter, not args.sfm_no_partial, self.rerun())
|
||||
else:
|
||||
lre = LocalRemoteExecutor(args.sm_cluster, args.rolling_shutter, self.rerun())
|
||||
lre.set_projects([os.path.abspath(os.path.join(p, "..")) for p in submodel_paths])
|
||||
|
@ -266,7 +266,7 @@ class ODMMergeStage(types.ODM_Stage):
|
|||
|
||||
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
|
||||
orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars)
|
||||
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles)
|
||||
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution)
|
||||
elif len(all_orthos_and_ortho_cuts) == 1:
|
||||
# Simply copy
|
||||
log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.")
|
||||
|
@ -306,7 +306,7 @@ class ODMMergeStage(types.ODM_Stage):
|
|||
log.ODM_INFO("Created %s" % dem_file)
|
||||
|
||||
if args.tiles:
|
||||
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency)
|
||||
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution)
|
||||
|
||||
if args.cog:
|
||||
convert_to_cogeo(dem_file, max_workers=args.max_concurrency)
|
||||
|
|
|
@ -67,15 +67,15 @@ platform="Linux" # Assumed
|
|||
uname=$(uname)
|
||||
case $uname in
|
||||
"Darwin")
|
||||
platform="MacOS / OSX"
|
||||
platform="MacOS"
|
||||
;;
|
||||
MINGW*)
|
||||
platform="Windows"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $platform != "Linux" ]]; then
|
||||
echo "This script only works on Linux."
|
||||
if [[ $platform != "Linux" && $platform != "MacOS" ]]; then
|
||||
echo "This script only works on Linux and MacOS."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ set GDAL_DRIVER_PATH=%GDALBASE%\gdalplugins
|
|||
set OSFMBASE=%ODMBASE%SuperBuild\install\bin\opensfm\bin
|
||||
set SBBIN=%ODMBASE%SuperBuild\install\bin
|
||||
set PDAL_DRIVER_PATH=%ODMBASE%SuperBuild\install\bin
|
||||
set PYTHONPYCACHEPREFIX=%PROGRAMDATA%\ODM\pycache
|
||||
|
||||
set PATH=%GDALBASE%;%SBBIN%;%OSFMBASE%
|
||||
set PROJ_LIB=%GDALBASE%\data\proj
|
||||
|
@ -23,14 +24,6 @@ set VIRTUAL_ENV=%ODMBASE%venv
|
|||
set PYTHONPATH=%VIRTUAL_ENV%
|
||||
set PYENVCFG=%VIRTUAL_ENV%\pyvenv.cfg
|
||||
|
||||
rem Hot-patching pyvenv.cfg
|
||||
echo home = %ODMBASE%\python38> "%PYENVCFG%"
|
||||
echo include-system-site-packages = false>> "%PYENVCFG%"
|
||||
|
||||
rem Hot-patching cv2 extension configs
|
||||
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> venv\Lib\site-packages\cv2\config.py
|
||||
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> venv\Lib\site-packages\cv2\config-3.8.py
|
||||
|
||||
if not defined PROMPT set PROMPT=$P$G
|
||||
|
||||
if defined _OLD_VIRTUAL_PROMPT set PROMPT=%_OLD_VIRTUAL_PROMPT%
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
set ODMBASE=%~dp0
|
||||
set VIRTUAL_ENV=%ODMBASE%venv
|
||||
set PYENVCFG=%VIRTUAL_ENV%\pyvenv.cfg
|
||||
set SBBIN=%ODMBASE%SuperBuild\install\bin
|
||||
|
||||
rem Hot-patching pyvenv.cfg
|
||||
echo home = %ODMBASE%venv\Scripts> "%PYENVCFG%"
|
||||
echo include-system-site-packages = false>> "%PYENVCFG%"
|
||||
|
||||
rem Hot-patching cv2 extension configs
|
||||
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> venv\Lib\site-packages\cv2\config.py
|
||||
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> venv\Lib\site-packages\cv2\config-3.8.py
|
||||
|
||||
cls
|
Ładowanie…
Reference in New Issue