file_path
stringlengths 21
224
| content
stringlengths 0
80.8M
|
---|---|
gazebosim/gz-omni/PACKAGE-LICENSES/omni-config-cpp-LICENSE.txt |
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
|
gazebosim/gz-omni/PACKAGE-LICENSES/zstd-LICENSE.txt | BSD License
For Zstandard software
Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Facebook nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
gazebosim/gz-omni/PACKAGE-LICENSES/openexr-LICENSE.txt | Copyright (c) 2006, Industrial Light & Magic, a division of Lucasfilm
Entertainment Company Ltd. Portions contributed and copyright held by
others as indicated. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution.
* Neither the name of Industrial Light & Magic nor the names of
any other contributors to this software may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
gazebosim/gz-omni/PACKAGE-LICENSES/glew-LICENSE.md | The OpenGL Extension Wrangler Library
Copyright (C) 2002-2007, Milan Ikits <milan ikits[]ieee org>
Copyright (C) 2002-2007, Marcelo E. Magallon <mmagallo[]debian org>
Copyright (C) 2002, Lev Povalahev
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of the author may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
Mesa 3-D graphics library
Version: 7.0
Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Copyright (c) 2007 The Khronos Group Inc.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and/or associated documentation files (the
"Materials"), to deal in the Materials without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Materials, and to
permit persons to whom the Materials are furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Materials.
THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
gazebosim/gz-omni/PACKAGE-LICENSES/brotli-LICENSE.txt | Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
|
gazebosim/gz-omni/PACKAGE-LICENSES/cpp-httplib-LICENSE.txt | The MIT License (MIT)
Copyright (c) 2017 yhirose
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
gazebosim/gz-omni/PACKAGE-LICENSES/gli-LICENSE.txt | ================================================================================
OpenGL Image (GLI)
--------------------------------------------------------------------------------
GLM can be distributed and/or modified under the terms of either
a) The Happy Bunny License, or b) the MIT License.
================================================================================
The Happy Bunny License (Modified MIT License)
--------------------------------------------------------------------------------
Copyright (c) 2010 - 2016 G-Truc Creation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Restrictions: By making use of the Software for military purposes, you choose
to make a Bunny unhappy.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================================================
The MIT License
--------------------------------------------------------------------------------
Copyright (c) 2010 - 2016 G-Truc Creation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
gazebosim/gz-omni/PACKAGE-LICENSES/libstdc++-LICENSE.txt | GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
GCC RUNTIME LIBRARY EXCEPTION
Version 3.1, 31 March 2009
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
This GCC Runtime Library Exception ("Exception") is an additional
permission under section 7 of the GNU General Public License, version
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
bears a notice placed by the copyright holder of the file stating that
the file is governed by GPLv3 along with this Exception.
When you use GCC to compile a program, GCC may combine portions of
certain GCC header files and runtime libraries with the compiled
program. The purpose of this Exception is to allow compilation of
non-GPL (including proprietary) programs to use, in this way, the
header files and runtime libraries covered by this Exception.
0. Definitions.
A file is an "Independent Module" if it either requires the Runtime
Library for execution after a Compilation Process, or makes use of an
interface provided by the Runtime Library, but is not otherwise based
on the Runtime Library.
"GCC" means a version of the GNU Compiler Collection, with or without
modifications, governed by version 3 (or a specified later version) of
the GNU General Public License (GPL) with the option of using any
subsequent versions published by the FSF.
"GPL-compatible Software" is software whose conditions of propagation,
modification and use would permit combination with GCC in accord with
the license of GCC.
"Target Code" refers to output from any compiler for a real or virtual
target processor architecture, in executable form or suitable for
input to an assembler, loader, linker and/or execution
phase. Notwithstanding that, Target Code does not include data in any
format that is used as a compiler intermediate representation, or used
for producing a compiler intermediate representation.
The "Compilation Process" transforms code entirely represented in
non-intermediate languages designed for human-written code, and/or in
Java Virtual Machine byte code, into Target Code. Thus, for example,
use of source code generators and preprocessors need not be considered
part of the Compilation Process, since the Compilation Process can be
understood as starting with the output of the generators or
preprocessors.
A Compilation Process is "Eligible" if it is done using GCC, alone or
with other GPL-compatible software, or if it is done without using any
work based on GCC. For example, using non-GPL-compatible Software to
optimize any GCC intermediate representations would not qualify as an
Eligible Compilation Process.
1. Grant of Additional Permission.
You have permission to propagate a work of Target Code formed by
combining the Runtime Library with Independent Modules, even if such
propagation would otherwise violate the terms of GPLv3, provided that
all Target Code was generated by Eligible Compilation Processes. You
may then convey such a combination under terms of your choice,
consistent with the licensing of the Independent Modules.
2. No Weakening of GCC Copyleft.
The availability of this Exception does not imply any general
presumption that third-party software is unaffected by the copyleft
requirements of the license of GCC.
|
gazebosim/gz-omni/PACKAGE-LICENSES/openexr-LICENSE.md | Copyright (c) 2006, Industrial Light & Magic, a division of Lucasfilm
Entertainment Company Ltd. Portions contributed and copyright held by
others as indicated. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution.
* Neither the name of Industrial Light & Magic nor the names of
any other contributors to this software may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
gazebosim/gz-omni/PACKAGE-LICENSES/libopus-LICENSE.txt | Copyright 2001-2011 Xiph.Org, Skype Limited, Octasic,
Jean-Marc Valin, Timothy B. Terriberry,
CSIRO, Gregory Maxwell, Mark Borgerding,
Erik de Castro Lopo
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Internet Society, IETF or IETF Trust, nor the
names of specific contributors, may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Opus is subject to the royalty-free patent licenses which are
specified at:
Xiph.Org Foundation:
https://datatracker.ietf.org/ipr/1524/
Microsoft Corporation:
https://datatracker.ietf.org/ipr/1914/
Broadcom Corporation:
https://datatracker.ietf.org/ipr/1526/
|
gazebosim/gz-omni/PACKAGE-LICENSES/doctest-LICENSE.txt | The MIT License (MIT)
Copyright (c) 2016-2021 Viktor Kirilov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
gazebosim/gz-omni/PACKAGE-LICENSES/omniverse.auth.client.c-LICENSE.txt | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
|
gazebosim/gz-omni/PACKAGE-LICENSES/idl.cpp-LICENSE.txt | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
|
gazebosim/gz-omni/PACKAGE-LICENSES/omniverse_connection.linux-x86_64-release-LICENSE.txt | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
|
gazebosim/gz-omni/source/ignition_live/ThreadSafe.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_THREADSAFE_HPP
#define IGNITION_OMNIVERSE_THREADSAFE_HPP
#include <mutex>
namespace ignition::omniverse
{
/// \brief Make an object threadsafe by locking it behind a mutex.
template <typename T, typename MutexT = std::recursive_mutex>
class ThreadSafe
{
public:
class Ref
{
public:
Ref(T& _data, MutexT& _m);
~Ref();
// don't allow copying and moving
Ref(const Ref&) = delete;
Ref(Ref&&) = delete;
Ref& operator=(const Ref&) = delete;
T& operator*();
T& operator->();
private:
MutexT& m;
T& data;
};
/// \brief Takes ownership of the data.
explicit ThreadSafe(T&& _data);
// don't allow copying
ThreadSafe(const ThreadSafe&) = delete;
ThreadSafe& operator=(const ThreadSafe&) = delete;
// moving is ok
ThreadSafe(ThreadSafe&&) = default;
/// \brief Locks the mutex
Ref Lock();
private:
T data;
MutexT mutex;
};
template <typename T, typename MutexT>
ThreadSafe<T, MutexT>::Ref::Ref(T& _data, MutexT& _m) : data(_data), m(_m)
{
this->m.lock();
}
template <typename T, typename MutexT>
ThreadSafe<T, MutexT>::Ref::~Ref()
{
this->m.unlock();
}
template <typename T, typename MutexT>
T& ThreadSafe<T, MutexT>::Ref::operator*()
{
return this->data;
}
template <typename T, typename MutexT>
T& ThreadSafe<T, MutexT>::Ref::operator->()
{
return this->data;
}
template <typename T, typename MutexT>
ThreadSafe<T, MutexT>::ThreadSafe(T&& _data) : data(_data)
{
}
template <typename T, typename MutexT>
typename ThreadSafe<T, MutexT>::Ref ThreadSafe<T, MutexT>::Lock()
{
return Ref(this->data, this->mutex);
}
} // namespace ignition::omniverse
#endif
|
gazebosim/gz-omni/source/ignition_live/SetOp.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_SET_OP_HPP
#define IGNITION_OMNIVERSE_SET_OP_HPP
#include <pxr/base/gf/vec3f.h>
#include <pxr/usd/usdGeom/xform.h>
namespace ignition
{
namespace omniverse
{
// A utility class to set the position, rotation, or scale values
class SetOp
{
public:
SetOp(pxr::UsdGeomXformable& xForm, pxr::UsdGeomXformOp& op,
pxr::UsdGeomXformOp::Type opType, const pxr::GfVec3d& value,
const pxr::UsdGeomXformOp::Precision precision)
{
if (!op)
{
op = xForm.AddXformOp(opType, precision);
}
if (op.GetPrecision() == pxr::UsdGeomXformOp::Precision::PrecisionFloat)
op.Set(pxr::GfVec3f(value));
else
op.Set(value);
}
};
} // namespace omniverse
} // namespace ignition
#endif
|
gazebosim/gz-omni/source/ignition_live/OmniClientpp.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "OmniClientpp.hpp"
#include <ignition/common/Console.hh>
#include <OmniClient.h>
namespace ignition::omniverse
{
OmniverseLock::OmniverseLock(const std::string& _url) : url(_url)
{
omniClientLock(this->url.c_str(), nullptr, nullptr);
}
OmniverseLock::~OmniverseLock()
{
omniClientUnlock(this->url.c_str(), nullptr, nullptr);
}
bool CheckClientResult(OmniClientResult result)
{
return result == eOmniClientResult_Ok || result == eOmniClientResult_OkLatest;
}
OmniverseSync::MaybeError<OmniClientListEntry> OmniverseSync::Stat(
const std::string& url) noexcept
{
MaybeError<OmniClientListEntry> ret(eOmniClientResult_Error);
omniClientWait(omniClientStat(
url.c_str(), &ret,
[](void* userData, OmniClientResult clientResult,
const OmniClientListEntry* entry) noexcept
{
auto* ret =
reinterpret_cast<MaybeError<OmniClientListEntry>*>(userData);
if (!CheckClientResult(clientResult))
{
*ret = clientResult;
return;
}
*ret = *entry;
}));
return ret;
}
} // namespace ignition::omniverse
|
gazebosim/gz-omni/source/ignition_live/OmniverseConnect.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_CONNECT_HPP
#define IGNITION_OMNIVERSE_CONNECT_HPP
#include "OmniClientpp.hpp"
#include <pxr/usd/usd/stage.h>
#include <OmniClient.h>
#include <OmniUsdLive.h>
#include <string>
// Global for making the logging reasonable
static std::mutex gLogMutex;
namespace ignition::omniverse
{
static std::string normalizedStageUrl;
// Stage URL really only needs to contain the server in the URL. eg.
// omniverse://ov-prod
void PrintConnectedUsername(const std::string& stageUrl);
/// \brief Creates a new ignition stage in omniverse, does nothing if the
/// stage already exists.
/// \details The new stage is authored with ignition metadata.
/// \return The url of the stage
MaybeError<std::string, GenericError> CreateOmniverseModel(
const std::string& destinationPath);
void CheckpointFile(const char* stageUrl, const char* comment);
// Startup Omniverse
bool StartOmniverse();
} // namespace ignition::omniverse
#endif
|
gazebosim/gz-omni/source/ignition_live/GetOp.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_GET_OP_HPP
#define IGNITION_OMNIVERSE_GET_OP_HPP
#include <pxr/base/gf/vec3f.h>
#include <pxr/base/gf/quatf.h>
#include <pxr/usd/usdGeom/xform.h>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
#include <ignition/math/Quaternion.hh>
namespace ignition
{
namespace omniverse
{
// A utility class to get the position, rotation, or scale values
class GetOp
{
public:
GetOp(pxr::UsdGeomXformable& xForm)
{
this->position = pxr::GfVec3d(0);
this->rotXYZ = pxr::GfVec3f(0);
this->scale = pxr::GfVec3f(1);
ignition::math::Quaterniond orientQuat;
bool resetXformStack = false;
std::vector<pxr::UsdGeomXformOp> xFormOps =
xForm.GetOrderedXformOps(&resetXformStack);
// Get the current xform op values
for (size_t i = 0; i < xFormOps.size(); i++)
{
switch (xFormOps[i].GetOpType())
{
case pxr::UsdGeomXformOp::TypeTranslate:
translateOp = xFormOps[i];
translateOp.Get(&this->position);
break;
case pxr::UsdGeomXformOp::TypeRotateXYZ:
rotateOp = xFormOps[i];
rotateOp.Get(&this->rotXYZ);
break;
case pxr::UsdGeomXformOp::TypeOrient:
rotateOp = xFormOps[i];
rotateOp.Get(&this->rotQ);
orientQuat = ignition::math::Quaterniond(
this->rotQ.GetReal(),
this->rotQ.GetImaginary()[0],
this->rotQ.GetImaginary()[1],
this->rotQ.GetImaginary()[2]);
this->rotXYZ = pxr::GfVec3f(
orientQuat.Roll(), orientQuat.Pitch(), orientQuat.Yaw());
break;
case pxr::UsdGeomXformOp::TypeScale:
scaleOp = xFormOps[i];
scaleOp.Get(&this->scale);
break;
}
}
}
// Define storage for the different xform ops that Omniverse Kit likes to use
pxr::UsdGeomXformOp translateOp;
pxr::UsdGeomXformOp rotateOp;
pxr::UsdGeomXformOp scaleOp;
pxr::GfVec3d position;
pxr::GfVec3f rotXYZ;
pxr::GfVec3f scale;
pxr::GfQuatf rotQ;
};
} // namespace omniverse
} // namespace ignition
#endif
|
gazebosim/gz-omni/source/ignition_live/Error.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_ERROR_HPP
#define IGNITION_OMNIVERSE_ERROR_HPP
#include <string>
#include <variant>
namespace ignition::omniverse
{
class GenericError
{
public:
std::string message;
explicit GenericError(const std::string& _message) : message(_message) {}
friend std::ostream& operator<<(std::ostream& _output,
const GenericError& _error)
{
_output << _error.message;
return _output;
}
};
/// \brief Represents the result of a function which may contain an error.
template <typename T, typename ErrorT>
class MaybeError
{
public:
// allow implicit conversion
MaybeError(const T& _val) : data(_val) {}
MaybeError(const ErrorT& _error) : data(_error) {}
/// \brief `true` if there is no error
explicit operator bool() const
{
return !std::holds_alternative<ErrorT>(this->data);
}
/// \brief Get the value of the result, throws if there is an error.
const T& Value() const { return std::get<T>(this->data); }
/// \brief Get the error, throws if there is no error.
const ErrorT& Error() const { return std::get<ErrorT>(this->data); }
private:
std::variant<T, ErrorT> data;
};
} // namespace ignition::omniverse
#endif
|
gazebosim/gz-omni/source/ignition_live/FUSDLayerNoticeListener.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "FUSDLayerNoticeListener.hpp"
#include <memory>
#include <string>
#include <ignition/transport/Node.hh>
namespace ignition
{
namespace omniverse
{
class FUSDLayerNoticeListener::Implementation
{
public:
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> stage;
std::string worldName;
ignition::transport::Node node;
};
FUSDLayerNoticeListener::FUSDLayerNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string& _worldName)
: dataPtr(ignition::utils::MakeUniqueImpl<Implementation>())
{
this->dataPtr->stage = _stage;
this->dataPtr->worldName = _worldName;
}
void FUSDLayerNoticeListener::HandleGlobalLayerReload(
const pxr::SdfNotice::LayerDidReloadContent& n)
{
igndbg << "HandleGlobalLayerReload called" << std::endl;
}
// Print some interesting info about the LayerNotice
void FUSDLayerNoticeListener::HandleRootOrSubLayerChange(
const class pxr::SdfNotice::LayersDidChangeSentPerLayer& _layerNotice,
const pxr::TfWeakPtr<pxr::SdfLayer>& _sender)
{
auto iter = _layerNotice.find(_sender);
for (auto & changeEntry : iter->second.GetEntryList())
{
const pxr::SdfPath& sdfPath = changeEntry.first;
if (changeEntry.second.flags.didRemoveNonInertPrim)
{
ignition::msgs::Entity req;
req.set_name(sdfPath.GetName());
req.set_type(ignition::msgs::Entity::MODEL);
ignition::msgs::Boolean rep;
bool result;
unsigned int timeout = 5000;
bool executed = this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/remove",
req, timeout, rep, result);
if (executed)
{
if (rep.data())
{
igndbg << "Model was removed [" << sdfPath.GetName() << "]"
<< std::endl;
this->dataPtr->stage->Lock()->RemovePrim(sdfPath);
}
else
{
ignerr << "Error model was not removed [" << sdfPath.GetName()
<< "]" << std::endl;
}
}
ignmsg << "Deleted " << sdfPath.GetName() << std::endl;
}
else if (changeEntry.second.flags.didAddNonInertPrim)
{
ignmsg << "Added" << sdfPath.GetName() << std::endl;
}
}
}
}
}
|
gazebosim/gz-omni/source/ignition_live/OmniverseConnect.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "OmniverseConnect.hpp"
#include <ignition/common/Console.hh>
#include <pxr/base/gf/vec3f.h>
#include <pxr/usd/usd/notice.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usd/primRange.h>
#include <pxr/usd/usdGeom/cylinder.h>
#include <pxr/usd/usdGeom/metrics.h>
#include <pxr/usd/usdGeom/xform.h>
#include <iostream>
#include <mutex>
#include <string>
namespace ignition
{
namespace omniverse
{
// Stage URL really only needs to contain the server in the URL. eg.
// omniverse://ov-prod
void PrintConnectedUsername(const std::string& stageUrl)
{
// Get the username for the connection
std::string userName("_none_");
omniClientWait(omniClientGetServerInfo(
stageUrl.c_str(), &userName,
[](void* userData, OmniClientResult result,
struct OmniClientServerInfo const* info) noexcept
{
std::string* userName = static_cast<std::string*>(userData);
if (userData && userName && info && info->username)
{
userName->assign(info->username);
}
}));
{
std::unique_lock<std::mutex> lk(gLogMutex);
ignmsg << "Connected username: " << userName << std::endl;
}
}
// This struct is context for the omniClientStatSubscribe() callbacks
struct StatSubscribeContext
{
std::string* stageUrlPtr;
pxr::UsdStageRefPtr stage;
};
// Called immediately due to the stat subscribe function
static void clientStatCallback(void* userData, OmniClientResult result,
struct OmniClientListEntry const* entry) noexcept
{
StatSubscribeContext* context = static_cast<StatSubscribeContext*>(userData);
if (result != OmniClientResult::eOmniClientResult_Ok)
{
ignerr << "stage not found: " << *context->stageUrlPtr << std::endl;
exit(1);
}
}
// Called due to the stat subscribe function when the file is updated
static void clientStatSubscribeCallback(
void* userData, OmniClientResult result, OmniClientListEvent listEvent,
struct OmniClientListEntry const* entry) noexcept
{
StatSubscribeContext* context = static_cast<StatSubscribeContext*>(userData);
switch (listEvent)
{
case eOmniClientListEvent_Updated:
{
ignmsg << "Updated - user: " << entry->modifiedBy
<< " version: " << entry->version << std::endl;
// Mark the last updated time
// *context->lastUpdatedTimePtr = std::time(0);
break;
}
case eOmniClientListEvent_Created:
ignmsg << "Created: " << entry->createdBy << std::endl;
break;
case eOmniClientListEvent_Deleted:
ignmsg << "Deleted: " << entry->createdBy << std::endl;
exit(1);
break;
case eOmniClientListEvent_Locked:
ignmsg << "Locked: " << entry->createdBy << std::endl;
break;
default:
break;
}
}
MaybeError<std::string, GenericError> CreateOmniverseModel(
const std::string& destinationPath)
{
std::string stageUrl = destinationPath;
// Normalize the URL because the omniUsdLiveSetModeForUrl() interface keys off
// of the _normalized_ stage path
std::string normalizedStageUrl;
char* normalizedStageBuffer = nullptr;
size_t bufferSize = 0;
omniClientNormalizeUrl(stageUrl.c_str(), normalizedStageUrl.data(),
&bufferSize);
normalizedStageUrl.reserve(bufferSize);
normalizedStageUrl += omniClientNormalizeUrl(
stageUrl.c_str(), normalizedStageUrl.data(), &bufferSize);
// according to usd docs, `UsdStage::Open` should auto create a new stage
// if the path doesn't exist, but this doesn't work in omniverse for
// some reason. So we check if the path exist and use `UsdStage::CreateNew`,
// if it does not.
auto entry = OmniverseSync::Stat(normalizedStageUrl);
if (!entry)
{
if (entry.Error() != eOmniClientResult_ErrorNotFound)
{
auto errString = omniClientGetResultString(entry.Error());
return GenericError("Failure to create stage in Omniverse (" +
std::string(errString) + ")");
}
else
{
auto stage = pxr::UsdStage::CreateNew(normalizedStageUrl);
// Specify ignition up-ness and units.
pxr::UsdGeomSetStageUpAxis(stage, pxr::UsdGeomTokens->z);
pxr::UsdGeomSetStageMetersPerUnit(stage, 1);
stage->SetMetadata(pxr::SdfFieldKeys->Comment,
"Created by ignition-omniverse");
stage->Save();
ignmsg << "Created omniverse stage at [" << normalizedStageUrl << "]"
<< std::endl;
}
}
return normalizedStageUrl;
}
void CheckpointFile(const char* stageUrl, const char* comment)
{
bool bCheckpointsSupported = false;
omniClientWait(omniClientGetServerInfo(
stageUrl, &bCheckpointsSupported,
[](void* UserData, OmniClientResult Result,
OmniClientServerInfo const* Info) noexcept
{
if (Result == eOmniClientResult_Ok && Info && UserData)
{
bool* bCheckpointsSupported = static_cast<bool*>(UserData);
*bCheckpointsSupported = Info->checkpointsEnabled;
}
}));
if (bCheckpointsSupported)
{
const bool bForceCheckpoint = true;
omniClientWait(omniClientCreateCheckpoint(
stageUrl, comment, bForceCheckpoint, nullptr,
[](void* userData, OmniClientResult result,
char const* checkpointQuery) noexcept {}));
}
}
// Startup Omniverse
bool StartOmniverse()
{
// Register a function to be called whenever the library wants to print
// something to a log
omniClientSetLogCallback(
[](char const* threadName, char const* component,
OmniClientLogLevel level, char const* message) noexcept
{
std::unique_lock<std::mutex> lk(gLogMutex);
switch (level)
{
case eOmniClientLogLevel_Debug:
case eOmniClientLogLevel_Verbose:
igndbg << "(" << component << ") " << message << std::endl;
break;
case eOmniClientLogLevel_Info:
ignmsg << "(" << component << ") " << message << std::endl;
break;
case eOmniClientLogLevel_Warning:
ignwarn << "(" << component << ") " << message << std::endl;
break;
case eOmniClientLogLevel_Error:
ignerr << "(" << component << ") " << message << std::endl;
break;
default:
igndbg << "(" << component << ") " << message << std::endl;
}
});
// The default log level is "Info", set it to "Debug" to see all messages
omniClientSetLogLevel(eOmniClientLogLevel_Info);
// Initialize the library and pass it the version constant defined in
// OmniClient.h This allows the library to verify it was built with a
// compatible version. It will return false if there is a version mismatch.
if (!omniClientInitialize(kOmniClientVersion))
{
return false;
}
omniClientRegisterConnectionStatusCallback(
nullptr,
[](void* userData, const char* url,
OmniClientConnectionStatus status) noexcept
{
std::unique_lock<std::mutex> lk(gLogMutex);
ignmsg << "Connection Status: "
<< omniClientGetConnectionStatusString(status) << " [" << url
<< "]" << std::endl;
if (status == eOmniClientConnectionStatus_ConnectError)
{
// We shouldn't just exit here - we should clean up a bit, but we're
// going to do it anyway
ignerr << "Failed connection, exiting." << std::endl;
exit(-1);
}
});
// Enable live updates
omniUsdLiveSetDefaultEnabled(true);
return true;
}
} // namespace omniverse
} // namespace ignition
|
gazebosim/gz-omni/source/ignition_live/Material.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_MATERIAL_HPP
#define IGNITION_OMNIVERSE_MATERIAL_HPP
#include <ignition/msgs/visual.pb.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/gprim.h>
#include <pxr/usd/usdShade/material.h>
namespace ignition
{
namespace omniverse
{
bool SetMaterial(const pxr::UsdGeomGprim& _gprim,
const ignition::msgs::Visual& _visualMsg,
const pxr::UsdStageRefPtr& _stage,
const std::string& _stageDirUrl);
}
} // namespace ignition
#endif
|
gazebosim/gz-omni/source/ignition_live/Scene.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Scene.hpp"
#include "FUSDLayerNoticeListener.hpp"
#include "FUSDNoticeListener.hpp"
#include "Material.hpp"
#include "Mesh.hpp"
#include <ignition/common/Console.hh>
#include <ignition/common/Filesystem.hh>
#include <ignition/math/Quaternion.hh>
#include <pxr/usd/usd/primRange.h>
#include <pxr/usd/usdGeom/camera.h>
#include <pxr/usd/usdGeom/xform.h>
#include <pxr/usd/usdLux/diskLight.h>
#include <pxr/usd/usdLux/distantLight.h>
#include <pxr/usd/usdLux/sphereLight.h>
#include <algorithm>
#include <chrono>
#include <string>
#include <thread>
#include <vector>
#include <pxr/base/tf/nullPtr.h>
using namespace std::chrono_literals;
namespace ignition
{
namespace omniverse
{
class Scene::Implementation
{
public:
std::string worldName;
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> stage;
ignition::transport::Node node;
std::string stageDirUrl;
std::unordered_map<uint32_t, pxr::UsdPrim> entities;
std::unordered_map<std::string, uint32_t> entitiesByName;
std::shared_ptr<FUSDLayerNoticeListener> USDLayerNoticeListener;
std::shared_ptr<FUSDNoticeListener> USDNoticeListener;
Simulator simulatorPoses = {Simulator::Ignition};
bool UpdateSensors(const ignition::msgs::Sensor &_sensor,
const std::string &_usdSensorPath);
bool UpdateLights(const ignition::msgs::Light &_light,
const std::string &_usdLightPath);
bool UpdateScene(const ignition::msgs::Scene &_scene);
bool UpdateVisual(const ignition::msgs::Visual &_visual,
const std::string &_usdPath);
bool UpdateLink(const ignition::msgs::Link &_link,
const std::string &_usdModelPath);
bool UpdateJoint(const ignition::msgs::Joint &_joint,
const std::string &_modelName);
bool UpdateModel(const ignition::msgs::Model &_model);
void SetPose(const pxr::UsdGeomXformCommonAPI &_prim,
const ignition::msgs::Pose &_pose);
void ResetPose(const pxr::UsdGeomXformCommonAPI &_prim);
void SetScale(const pxr::UsdGeomXformCommonAPI &_xform,
const ignition::msgs::Vector3d &_scale);
void ResetScale(const pxr::UsdGeomXformCommonAPI &_prim);
void CallbackPoses(const ignition::msgs::Pose_V &_msg);
void CallbackJoint(const ignition::msgs::Model &_msg);
void CallbackScene(const ignition::msgs::Scene &_scene);
void CallbackSceneDeletion(const ignition::msgs::UInt32_V &_msg);
};
//////////////////////////////////////////////////
Scene::Scene(
const std::string &_worldName,
const std::string &_stageUrl,
Simulator _simulatorPoses)
: dataPtr(ignition::utils::MakeUniqueImpl<Implementation>())
{
ignmsg << "Opened stage [" << _stageUrl << "]" << std::endl;
this->dataPtr->worldName = _worldName;
this->dataPtr->stage = std::make_shared<ThreadSafe<pxr::UsdStageRefPtr>>(
pxr::UsdStage::Open(_stageUrl));
this->dataPtr->stageDirUrl = ignition::common::parentPath(_stageUrl);
this->dataPtr->simulatorPoses = _simulatorPoses;
}
// //////////////////////////////////////////////////
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &Scene::Stage()
{
return this->dataPtr->stage;
}
//////////////////////////////////////////////////
void Scene::Implementation::SetPose(const pxr::UsdGeomXformCommonAPI &_prim,
const ignition::msgs::Pose &_pose)
{
if (this->simulatorPoses == Simulator::Ignition)
{
if (_prim)
{
pxr::UsdGeomXformCommonAPI xformApi(_prim);
const auto &pos = _pose.position();
const auto &orient = _pose.orientation();
ignition::math::Quaterniond quat(orient.w(), orient.x(), orient.y(),
orient.z());
xformApi.SetTranslate(pxr::GfVec3d(pos.x(), pos.y(), pos.z()));
xformApi.SetRotate(pxr::GfVec3f(
ignition::math::Angle(quat.Roll()).Degree(),
ignition::math::Angle(quat.Pitch()).Degree(),
ignition::math::Angle(quat.Yaw()).Degree()),
pxr::UsdGeomXformCommonAPI::RotationOrderXYZ);
}
}
}
//////////////////////////////////////////////////
void Scene::Implementation::ResetPose(const pxr::UsdGeomXformCommonAPI &_prim)
{
pxr::UsdGeomXformCommonAPI xformApi(_prim);
xformApi.SetTranslate(pxr::GfVec3d(0));
xformApi.SetRotate(pxr::GfVec3f(0));
}
//////////////////////////////////////////////////
void Scene::Implementation::SetScale(const pxr::UsdGeomXformCommonAPI &_prim,
const ignition::msgs::Vector3d &_scale)
{
pxr::UsdGeomXformCommonAPI xformApi(_prim);
xformApi.SetScale(pxr::GfVec3f(_scale.x(), _scale.y(), _scale.z()));
}
//////////////////////////////////////////////////
void Scene::Implementation::ResetScale(const pxr::UsdGeomXformCommonAPI &_prim)
{
pxr::UsdGeomXformCommonAPI xformApi(_prim);
xformApi.SetScale(pxr::GfVec3f(1));
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateVisual(const ignition::msgs::Visual &_visual,
const std::string &_usdLinkPath)
{
auto stage = this->stage->Lock();
std::string visualName = _visual.name();
std::string suffix = "_visual";
std::size_t found = visualName.find("_visual");
if (found != std::string::npos)
{
suffix = "";
}
std::string usdVisualPath = _usdLinkPath + "/" + _visual.name() + suffix;
auto prim = stage->GetPrimAtPath(pxr::SdfPath(usdVisualPath));
if (prim)
return true;
auto usdVisualXform =
pxr::UsdGeomXform::Define(*stage, pxr::SdfPath(usdVisualPath));
pxr::UsdGeomXformCommonAPI xformApi(usdVisualXform);
if (_visual.has_scale())
{
this->SetScale(xformApi, _visual.scale());
}
else
{
this->ResetScale(xformApi);
}
if (_visual.has_pose())
{
this->SetPose(xformApi, _visual.pose());
}
else
{
this->ResetPose(xformApi);
}
this->entities[_visual.id()] = usdVisualXform.GetPrim();
this->entitiesByName[usdVisualXform.GetPrim().GetName()] = _visual.id();
std::string usdGeomPath(usdVisualPath + "/geometry");
const auto &geom = _visual.geometry();
switch (geom.type())
{
case ignition::msgs::Geometry::BOX:
{
auto usdCube =
pxr::UsdGeomCube::Define(*stage, pxr::SdfPath(usdGeomPath));
usdCube.CreateSizeAttr().Set(1.0);
pxr::GfVec3f endPoint(0.5);
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(-1.0 * endPoint);
extentBounds.push_back(endPoint);
usdCube.CreateExtentAttr().Set(extentBounds);
pxr::UsdGeomXformCommonAPI cubeXformAPI(usdCube);
cubeXformAPI.SetScale(pxr::GfVec3f(
geom.box().size().x(), geom.box().size().y(), geom.box().size().z()));
if (!SetMaterial(usdCube, _visual, *stage, this->stageDirUrl))
{
ignwarn << "Failed to set material" << std::endl;
}
break;
}
// TODO: Support cone
// case ignition::msgs::Geometry::CONE:
case ignition::msgs::Geometry::CYLINDER:
{
auto usdCylinder =
pxr::UsdGeomCylinder::Define(*stage, pxr::SdfPath(usdGeomPath));
double radius = geom.cylinder().radius();
double length = geom.cylinder().length();
usdCylinder.CreateRadiusAttr().Set(radius);
usdCylinder.CreateHeightAttr().Set(length);
pxr::GfVec3f endPoint(radius);
endPoint[2] = length * 0.5;
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(-1.0 * endPoint);
extentBounds.push_back(endPoint);
usdCylinder.CreateExtentAttr().Set(extentBounds);
if (!SetMaterial(usdCylinder, _visual, *stage, this->stageDirUrl))
{
ignwarn << "Failed to set material" << std::endl;
}
break;
}
case ignition::msgs::Geometry::PLANE:
{
auto usdCube =
pxr::UsdGeomCube::Define(*stage, pxr::SdfPath(usdGeomPath));
usdCube.CreateSizeAttr().Set(1.0);
pxr::GfVec3f endPoint(0.5);
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(-1.0 * endPoint);
extentBounds.push_back(endPoint);
usdCube.CreateExtentAttr().Set(extentBounds);
pxr::UsdGeomXformCommonAPI cubeXformAPI(usdCube);
cubeXformAPI.SetScale(
pxr::GfVec3f(geom.plane().size().x(), geom.plane().size().y(), 0.0025));
if (!SetMaterial(usdCube, _visual, *stage, this->stageDirUrl))
{
ignwarn << "Failed to set material" << std::endl;
}
break;
}
case ignition::msgs::Geometry::ELLIPSOID:
{
auto usdEllipsoid =
pxr::UsdGeomSphere::Define(*stage, pxr::SdfPath(usdGeomPath));
const auto maxRadii =
ignition::math::Vector3d(geom.ellipsoid().radii().x(),
geom.ellipsoid().radii().y(),
geom.ellipsoid().radii().z())
.Max();
usdEllipsoid.CreateRadiusAttr().Set(0.5);
pxr::UsdGeomXformCommonAPI xform(usdEllipsoid);
xform.SetScale(pxr::GfVec3f{
static_cast<float>(geom.ellipsoid().radii().x() / maxRadii),
static_cast<float>(geom.ellipsoid().radii().y() / maxRadii),
static_cast<float>(geom.ellipsoid().radii().z() / maxRadii),
});
// extents is the bounds before any transformation
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(pxr::GfVec3f{static_cast<float>(-maxRadii)});
extentBounds.push_back(pxr::GfVec3f{static_cast<float>(maxRadii)});
usdEllipsoid.CreateExtentAttr().Set(extentBounds);
if (!SetMaterial(usdEllipsoid, _visual, *stage, this->stageDirUrl))
{
ignwarn << "Failed to set material" << std::endl;
}
break;
}
case ignition::msgs::Geometry::SPHERE:
{
auto usdSphere =
pxr::UsdGeomSphere::Define(*stage, pxr::SdfPath(usdGeomPath));
double radius = geom.sphere().radius();
usdSphere.CreateRadiusAttr().Set(radius);
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(pxr::GfVec3f(-1.0 * radius));
extentBounds.push_back(pxr::GfVec3f(radius));
usdSphere.CreateExtentAttr().Set(extentBounds);
if (!SetMaterial(usdSphere, _visual, *stage, this->stageDirUrl))
{
ignwarn << "Failed to set material" << std::endl;
}
break;
}
case ignition::msgs::Geometry::CAPSULE:
{
auto usdCapsule =
pxr::UsdGeomCapsule::Define(*stage, pxr::SdfPath(usdGeomPath));
double radius = geom.capsule().radius();
double length = geom.capsule().length();
usdCapsule.CreateRadiusAttr().Set(radius);
usdCapsule.CreateHeightAttr().Set(length);
pxr::GfVec3f endPoint(radius);
endPoint[2] += 0.5 * length;
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(-1.0 * endPoint);
extentBounds.push_back(endPoint);
usdCapsule.CreateExtentAttr().Set(extentBounds);
if (!SetMaterial(usdCapsule, _visual, *stage, this->stageDirUrl))
{
ignwarn << "Failed to set material" << std::endl;
}
break;
}
case ignition::msgs::Geometry::MESH:
{
auto usdMesh = UpdateMesh(geom.mesh(), usdGeomPath, *stage);
if (!usdMesh)
{
ignerr << "Failed to update visual [" << _visual.name() << "]"
<< std::endl;
return false;
}
if (!SetMaterial(usdMesh, _visual, *stage, this->stageDirUrl))
{
ignerr << "Failed to update visual [" << _visual.name() << "]"
<< std::endl;
return false;
}
break;
}
default:
ignerr << "Failed to update geometry (unsuported geometry type '"
<< _visual.type() << "')" << std::endl;
return false;
}
// TODO(ahcorde): When usdphysics will be available in nv-usd we should
// replace this code with pxr::UsdPhysicsCollisionAPI::Apply(geomPrim)
pxr::TfToken appliedSchemaNamePhysicsCollisionAPI("PhysicsCollisionAPI");
pxr::SdfPrimSpecHandle primSpec = pxr::SdfCreatePrimInLayer(
stage->GetEditTarget().GetLayer(),
pxr::SdfPath(usdGeomPath));
pxr::SdfTokenListOp listOpPanda;
// Use ReplaceOperations to append in place.
if (!listOpPanda.ReplaceOperations(pxr::SdfListOpTypeExplicit,
0, 0, {appliedSchemaNamePhysicsCollisionAPI})) {
ignerr << "Error Applying schema PhysicsCollisionAPI" << '\n';
}
primSpec->SetInfo(
pxr::UsdTokens->apiSchemas, pxr::VtValue::Take(listOpPanda));
return true;
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateLink(const ignition::msgs::Link &_link,
const std::string &_usdModelPath)
{
auto stage = this->stage->Lock();
std::string linkName = _link.name();
std::string suffix = "_link";
std::size_t found = linkName.find("_link");
if (found != std::string::npos)
{
suffix = "";
}
std::string usdLinkPath = _usdModelPath + "/" + _link.name();
auto prim = stage->GetPrimAtPath(pxr::SdfPath(usdLinkPath));
if (prim)
{
return true;
}
else
{
usdLinkPath = _usdModelPath + "/" + _link.name() + suffix;
prim = stage->GetPrimAtPath(pxr::SdfPath(usdLinkPath));
if (prim)
{
return true;
}
}
auto xform = pxr::UsdGeomXform::Define(*stage, pxr::SdfPath(usdLinkPath));
pxr::UsdGeomXformCommonAPI xformApi(xform);
if (_link.has_pose())
{
this->SetPose(xformApi, _link.pose());
}
else
{
this->ResetPose(xformApi);
}
this->entities[_link.id()] = xform.GetPrim();
this->entitiesByName[xform.GetPrim().GetName()] = _link.id();
for (const auto &visual : _link.visual())
{
if (!this->UpdateVisual(visual, usdLinkPath))
{
ignerr << "Failed to update link [" << _link.name() << "]" << std::endl;
return false;
}
}
for (const auto &sensor : _link.sensor())
{
std::string usdSensorPath = usdLinkPath + "/" + sensor.name();
if (!this->UpdateSensors(sensor, usdSensorPath))
{
ignerr << "Failed to add sensor [" << usdSensorPath << "]" << std::endl;
return false;
}
}
for (const auto &light : _link.light())
{
if (!this->UpdateLights(light, usdLinkPath + "/" + light.name()))
{
ignerr << "Failed to add light [" << usdLinkPath + "/" + light.name()
<< "]" << std::endl;
return false;
}
}
return true;
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateJoint(
const ignition::msgs::Joint &_joint, const std::string &_modelName)
{
auto stage = this->stage->Lock();
auto jointUSD =
stage->GetPrimAtPath(pxr::SdfPath("/" + worldName + "/" + _joint.name()));
// TODO(ahcorde): This code is duplicated in the sdformat converter.
if (!jointUSD)
{
jointUSD =
stage->GetPrimAtPath(
pxr::SdfPath(
"/" + worldName + "/" + _modelName + "/" + _joint.name()));
if (!jointUSD)
{
switch (_joint.type())
{
case ignition::msgs::Joint::FIXED:
{
pxr::TfToken usdPrimTypeName("PhysicsFixedJoint");
auto jointFixedUSD = stage->DefinePrim(
pxr::SdfPath("/" + this->worldName + "/" + _joint.name()),
usdPrimTypeName);
auto body0 = jointFixedUSD.CreateRelationship(
pxr::TfToken("physics:body0"), false);
body0.AddTarget(pxr::SdfPath(
"/" + this->worldName + "/" + _joint.parent()));
auto body1 = jointFixedUSD.CreateRelationship(
pxr::TfToken("physics:body1"), false);
body1.AddTarget(pxr::SdfPath(
"/" + this->worldName + "/" + _joint.child()));
jointFixedUSD.CreateAttribute(pxr::TfToken("physics:localPos1"),
pxr::SdfValueTypeNames->Point3fArray, false).Set(
pxr::GfVec3f(0, 0, 0));
jointFixedUSD.CreateAttribute(pxr::TfToken("physics:localPos0"),
pxr::SdfValueTypeNames->Point3fArray, false).Set(
pxr::GfVec3f(_joint.pose().position().x(),
_joint.pose().position().y(),
_joint.pose().position().z()));
return true;
}
case ignition::msgs::Joint::REVOLUTE:
{
igndbg << "Creating a revolute joint" << '\n';
pxr::TfToken usdPrimTypeName("PhysicsRevoluteJoint");
auto revoluteJointUSD = stage->DefinePrim(
pxr::SdfPath("/" + this->worldName + "/" + _joint.name()),
usdPrimTypeName);
igndbg << "\tParent "
<< "/" + this->worldName + "/" + _joint.parent() << '\n';
igndbg << "\tchild "
<< "/" + this->worldName + "/" + _joint.child() << '\n';
pxr::TfTokenVector identifiersBody0 =
{pxr::TfToken("physics"), pxr::TfToken("body0")};
if (pxr::UsdRelationship body0 = revoluteJointUSD.CreateRelationship(
pxr::TfToken(pxr::SdfPath::JoinIdentifier(identifiersBody0)), false))
{
body0.AddTarget(
pxr::SdfPath("/" + this->worldName + "/panda/" + _joint.parent()),
pxr::UsdListPositionFrontOfAppendList);
}
else
{
igndbg << "Not able to create UsdRelationship for body1" << '\n';
}
pxr::TfTokenVector identifiersBody1 =
{pxr::TfToken("physics"), pxr::TfToken("body1")};
if (pxr::UsdRelationship body1 = revoluteJointUSD.CreateRelationship(
pxr::TfToken(pxr::SdfPath::JoinIdentifier(identifiersBody1)), false))
{
body1.AddTarget(
pxr::SdfPath("/" + this->worldName + "/panda/" + _joint.child()),
pxr::UsdListPositionFrontOfAppendList);
}
else
{
igndbg << "Not able to create UsdRelationship for body1" << '\n';
}
ignition::math::Vector3i axis(
_joint.axis1().xyz().x(),
_joint.axis1().xyz().y(),
_joint.axis1().xyz().z());
if (axis == ignition::math::Vector3i(1, 0, 0))
{
revoluteJointUSD.CreateAttribute(pxr::TfToken("physics:axis"),
pxr::SdfValueTypeNames->Token, false).Set(pxr::TfToken("X"));
}
else if (axis == ignition::math::Vector3i(0, 1, 0))
{
revoluteJointUSD.CreateAttribute(pxr::TfToken("physics:axis"),
pxr::SdfValueTypeNames->Token, false).Set(pxr::TfToken("Y"));
}
else if (axis == ignition::math::Vector3i(0, 0, 1))
{
revoluteJointUSD.CreateAttribute(pxr::TfToken("physics:axis"),
pxr::SdfValueTypeNames->Token, false).Set(pxr::TfToken("Z"));
}
revoluteJointUSD.CreateAttribute(pxr::TfToken("physics:localPos1"),
pxr::SdfValueTypeNames->Point3f, false).Set(
pxr::GfVec3f(0, 0, 0));
revoluteJointUSD.CreateAttribute(pxr::TfToken("physics:localPos0"),
pxr::SdfValueTypeNames->Point3f, false).Set(
pxr::GfVec3f(
_joint.pose().position().x(),
_joint.pose().position().y(),
_joint.pose().position().z()));
revoluteJointUSD.CreateAttribute(
pxr::TfToken("drive:angular:physics:damping"),
pxr::SdfValueTypeNames->Float, false).Set(100000.0f);
revoluteJointUSD.CreateAttribute(
pxr::TfToken("drive:angular:physics:stiffness"),
pxr::SdfValueTypeNames->Float, false).Set(1000000.0f);
revoluteJointUSD.CreateAttribute(
pxr::TfToken("drive:angular:physics:targetPosition"),
pxr::SdfValueTypeNames->Float, false).Set(0.0f);
revoluteJointUSD.CreateAttribute(
pxr::TfToken("physics:lowerLimit"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(_joint.axis1().limit_lower() * 180 / 3.1416));
revoluteJointUSD.CreateAttribute(
pxr::TfToken("physics:upperLimit"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(_joint.axis1().limit_upper() * 180 / 3.1416));
pxr::TfToken appliedSchemaNamePhysicsArticulationRootAPI(
"PhysicsArticulationRootAPI");
pxr::TfToken appliedSchemaNamePhysxArticulationAPI(
"PhysxArticulationAPI");
pxr::SdfPrimSpecHandle primSpecPanda = pxr::SdfCreatePrimInLayer(
stage->GetEditTarget().GetLayer(),
pxr::SdfPath("/" + this->worldName + "/panda"));
pxr::SdfTokenListOp listOpPanda;
// Use ReplaceOperations to append in place.
if (!listOpPanda.ReplaceOperations(
pxr::SdfListOpTypeExplicit,
0,
0,
{appliedSchemaNamePhysicsArticulationRootAPI,
appliedSchemaNamePhysxArticulationAPI})) {
ignerr << "Not able to setup the schema PhysxArticulationAPI "
<< "and PhysicsArticulationRootAPI\n";
}
primSpecPanda->SetInfo(
pxr::UsdTokens->apiSchemas, pxr::VtValue::Take(listOpPanda));
pxr::TfToken appliedSchemaName("PhysicsDriveAPI:angular");
pxr::SdfPrimSpecHandle primSpec = pxr::SdfCreatePrimInLayer(
stage->GetEditTarget().GetLayer(),
pxr::SdfPath("/" + this->worldName + "/" + _joint.name()));
pxr::SdfTokenListOp listOp;
// Use ReplaceOperations to append in place.
if (!listOp.ReplaceOperations(pxr::SdfListOpTypeExplicit,
0, 0, {appliedSchemaName})) {
ignerr << "Not able to setup the schema PhysicsDriveAPI\n";
}
primSpec->SetInfo(
pxr::UsdTokens->apiSchemas, pxr::VtValue::Take(listOp));
break;
}
default:
return false;
}
}
}
auto attrTargetPos = jointUSD.GetAttribute(
pxr::TfToken("drive:angular:physics:targetPosition"));
if (attrTargetPos)
{
attrTargetPos.Set(pxr::VtValue(
static_cast<float>(
ignition::math::Angle(_joint.axis1().position()).Degree())));
}
else
{
jointUSD.CreateAttribute(
pxr::TfToken("drive:angular:physics:targetPosition"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(
ignition::math::Angle(_joint.axis1().position()).Degree()));
}
return true;
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateModel(const ignition::msgs::Model &_model)
{
auto stage = this->stage->Lock();
std::string modelName = _model.name();
if (modelName.empty())
return true;
auto range = pxr::UsdPrimRange::Stage(*stage);
for (auto const &prim : range)
{
if (prim.GetName().GetString() == modelName)
{
ignwarn << "The model [" << _model.name() << "] is already available"
<< " in Isaac Sim" << std::endl;
std::string usdModelPath = "/" + worldName + "/" + modelName;
auto prim = stage->GetPrimAtPath(
pxr::SdfPath(usdModelPath));
if (prim)
{
this->entities[_model.id()] = prim;
this->entitiesByName[prim.GetName()] = _model.id();
for (const auto &link : _model.link())
{
std::string linkName = link.name();
std::string suffix = "_link";
std::size_t found = linkName.find("_link");
if (found != std::string::npos)
{
suffix = "";
}
std::string usdLinkPath = usdModelPath + "/" + linkName + suffix;
auto linkPrim = stage->GetPrimAtPath(
pxr::SdfPath(usdLinkPath));
if (!linkPrim)
{
usdLinkPath = usdModelPath + "/" + linkName;
linkPrim = stage->GetPrimAtPath(
pxr::SdfPath(usdLinkPath));
}
if (linkPrim)
{
this->entities[link.id()] = linkPrim;
this->entitiesByName[linkPrim.GetName()] = link.id();
for (const auto &visual : link.visual())
{
std::string visualName = visual.name();
std::string suffix = "_visual";
std::size_t found = visualName.find("_visual");
if (found != std::string::npos)
{
suffix = "";
}
std::string usdvisualPath =
usdLinkPath + "/" + visualName + suffix;
auto visualPrim = stage->GetPrimAtPath(
pxr::SdfPath(usdvisualPath));
if (visualPrim)
{
this->entities[visual.id()] = visualPrim;
this->entitiesByName[visualPrim.GetName()] = visual.id();
}
else
{
usdvisualPath =
usdLinkPath + "/" + visualName;
visualPrim = stage->GetPrimAtPath(
pxr::SdfPath(usdvisualPath));
if (visualPrim)
{
this->entities[visual.id()] = visualPrim;
this->entitiesByName[visualPrim.GetName()] = visual.id();
}
}
}
for (const auto &light : link.light())
{
std::string usdLightPath =
usdLinkPath + "/" + light.name();
auto lightPrim = stage->GetPrimAtPath(
pxr::SdfPath(usdLightPath));
if (lightPrim)
{
this->entities[light.id()] = lightPrim;
this->entitiesByName[lightPrim.GetName()] = light.id();
}
}
}
}
}
}
}
std::replace(modelName.begin(), modelName.end(), ' ', '_');
std::string usdModelPath = "/" + worldName + "/" + modelName;
this->entitiesByName[modelName] = _model.id();
auto xform = pxr::UsdGeomXform::Define(*stage, pxr::SdfPath(usdModelPath));
pxr::UsdGeomXformCommonAPI xformApi(xform);
if (_model.has_scale())
{
this->SetScale(xformApi, _model.scale());
}
else
{
this->ResetScale(xformApi);
}
if (_model.has_pose())
{
this->SetPose(xformApi, _model.pose());
}
else
{
this->ResetPose(xformApi);
}
this->entities[_model.id()] = xform.GetPrim();
for (const auto &link : _model.link())
{
if (!this->UpdateLink(link, usdModelPath))
{
ignerr << "Failed to update model [" << modelName << "]" << std::endl;
return false;
}
}
for (const auto &joint : _model.joint())
{
if (!this->UpdateJoint(joint, _model.name()))
{
ignerr << "Failed to update model [" << modelName << "]" << std::endl;
return false;
}
}
return true;
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateScene(const ignition::msgs::Scene &_scene)
{
for (const auto &model : _scene.model())
{
if (!this->UpdateModel(model))
{
ignerr << "Failed to add model [" << model.name() << "]" << std::endl;
return false;
}
igndbg << "added model [" << model.name() << "]" << std::endl;
}
for (const auto &light : _scene.light())
{
if (!this->UpdateLights(light, "/" + worldName + "/" + light.name()))
{
ignerr << "Failed to add light [" << light.name() << "]" << std::endl;
return false;
}
}
return true;
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateSensors(const ignition::msgs::Sensor &_sensor,
const std::string &_usdSensorPath)
{
auto stage = this->stage->Lock();
// TODO(ahcorde): This code is duplicated in the USD converter (sdformat)
if (_sensor.type() == "camera")
{
auto usdCamera = pxr::UsdGeomCamera::Define(
*stage, pxr::SdfPath(_usdSensorPath));
// TODO(ahcorde): The default value in USD is 50, but something more
// similar to ignition Gazebo is 40.
usdCamera.CreateFocalLengthAttr().Set(
static_cast<float>(52.0f));
usdCamera.CreateClippingRangeAttr().Set(pxr::GfVec2f(
static_cast<float>(_sensor.camera().near_clip()),
static_cast<float>(_sensor.camera().far_clip())));
usdCamera.CreateHorizontalApertureAttr().Set(
static_cast<float>(
_sensor.camera().horizontal_fov() * 180.0f / IGN_PI));
ignition::math::Pose3d poseCameraYUp(0, 0, 0, IGN_PI_2, 0, -IGN_PI_2);
ignition::math::Quaterniond q(
_sensor.pose().orientation().w(),
_sensor.pose().orientation().x(),
_sensor.pose().orientation().y(),
_sensor.pose().orientation().z());
ignition::math::Pose3d poseCamera(
_sensor.pose().position().x(),
_sensor.pose().position().y(),
_sensor.pose().position().z(),
q.Roll() * 180.0 / IGN_PI,
q.Pitch() * 180.0 / IGN_PI,
q.Yaw() * 180. / IGN_PI);
poseCamera = poseCamera * poseCameraYUp;
usdCamera.AddTranslateOp(pxr::UsdGeomXformOp::Precision::PrecisionDouble)
.Set(
pxr::GfVec3d(
poseCamera.Pos().X(),
poseCamera.Pos().Y(),
poseCamera.Pos().Z()));
usdCamera.AddRotateXYZOp(pxr::UsdGeomXformOp::Precision::PrecisionDouble)
.Set(
pxr::GfVec3d(
poseCamera.Rot().Roll() * 180.0 / IGN_PI,
poseCamera.Rot().Pitch() * 180.0 / IGN_PI,
poseCamera.Rot().Yaw() * 180. / IGN_PI));
}
else if (_sensor.type() == "gpu_lidar")
{
pxr::UsdGeomXform::Define(
*stage, pxr::SdfPath(_usdSensorPath));
auto lidarPrim = stage->GetPrimAtPath(
pxr::SdfPath(_usdSensorPath));
lidarPrim.SetTypeName(pxr::TfToken("Lidar"));
lidarPrim.CreateAttribute(pxr::TfToken("minRange"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(_sensor.lidar().range_min()));
lidarPrim.CreateAttribute(pxr::TfToken("maxRange"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(_sensor.lidar().range_max()));
const auto horizontalFov = _sensor.lidar().horizontal_max_angle() -
_sensor.lidar().horizontal_min_angle();
// TODO(adlarkin) double check if these FOV calculations are correct
lidarPrim.CreateAttribute(pxr::TfToken("horizontalFov"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(horizontalFov * 180.0f / IGN_PI));
const auto verticalFov = _sensor.lidar().vertical_max_angle() -
_sensor.lidar().vertical_min_angle();
lidarPrim.CreateAttribute(pxr::TfToken("verticalFov"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(verticalFov * 180.0f / IGN_PI));
lidarPrim.CreateAttribute(pxr::TfToken("horizontalResolution"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(_sensor.lidar().horizontal_resolution()));
lidarPrim.CreateAttribute(pxr::TfToken("verticalResolution"),
pxr::SdfValueTypeNames->Float, false).Set(
static_cast<float>(_sensor.lidar().vertical_resolution()));
}
else
{
ignwarn << "This kind of sensor [" << _sensor.type()
<< "] is not supported" << std::endl;
return true;
}
return true;
}
//////////////////////////////////////////////////
bool Scene::Implementation::UpdateLights(const ignition::msgs::Light &_light,
const std::string &_usdLightPath)
{
// TODO: We can probably re-use code from sdformat
auto stage = this->stage->Lock();
const pxr::SdfPath sdfLightPath(_usdLightPath);
switch (_light.type())
{
case ignition::msgs::Light::POINT:
{
auto pointLight = pxr::UsdLuxSphereLight::Define(*stage, sdfLightPath);
pointLight.CreateTreatAsPointAttr().Set(true);
this->entities[_light.id()] = pointLight.GetPrim();
this->entitiesByName[pointLight.GetPrim().GetName()] = _light.id();
pointLight.CreateRadiusAttr(pxr::VtValue(0.1f));
pointLight.CreateColorAttr(pxr::VtValue(pxr::GfVec3f(
_light.diffuse().r(), _light.diffuse().g(), _light.diffuse().b())));
break;
}
case ignition::msgs::Light::SPOT:
{
auto diskLight = pxr::UsdLuxDiskLight::Define(*stage, sdfLightPath);
this->entities[_light.id()] = diskLight.GetPrim();
this->entitiesByName[diskLight.GetPrim().GetName()] = _light.id();
diskLight.CreateColorAttr(pxr::VtValue(pxr::GfVec3f(
_light.diffuse().r(), _light.diffuse().g(), _light.diffuse().b())));
break;
}
case ignition::msgs::Light::DIRECTIONAL:
{
auto directionalLight =
pxr::UsdLuxDistantLight::Define(*stage, sdfLightPath);
this->entities[_light.id()] = directionalLight.GetPrim();
this->entitiesByName[directionalLight.GetPrim().GetName()] = _light.id();
directionalLight.CreateColorAttr(pxr::VtValue(pxr::GfVec3f(
_light.diffuse().r(), _light.diffuse().g(), _light.diffuse().b())));
break;
}
default:
return false;
}
// This is a workaround to set the light's intensity attribute. Using the
// UsdLuxLightAPI sets the light's "inputs:intensity" attribute, but isaac
// sim reads the light's "intensity" attribute. Both inputs:intensity and
// intensity are set to provide flexibility with other USD renderers
const float usdLightIntensity =
static_cast<float>(_light.intensity()) * 1000.0f;
auto lightPrim = stage->GetPrimAtPath(sdfLightPath);
lightPrim
.CreateAttribute(pxr::TfToken("intensity"), pxr::SdfValueTypeNames->Float,
false)
.Set(usdLightIntensity);
return true;
}
//////////////////////////////////////////////////
bool Scene::Init()
{
bool result;
ignition::msgs::Empty req;
ignition::msgs::Scene ignScene;
if (!this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/scene/info", req, 5000,
ignScene, result))
{
ignwarn << "Error requesting scene info, make sure the world ["
<< this->dataPtr->worldName
<< "] is available, ignition-omniverse will keep trying..."
<< std::endl;
if (!this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/scene/info", req, -1,
ignScene, result))
{
ignerr << "Error request scene info" << std::endl;
return false;
}
}
if (!this->dataPtr->UpdateScene(ignScene))
{
ignerr << "Failed to init scene" << std::endl;
return false;
}
std::vector<std::string> topics;
this->dataPtr->node.TopicList(topics);
for (auto const &topic : topics)
{
if (topic.find("/joint_state") != std::string::npos)
{
if (!this->dataPtr->node.Subscribe(
topic, &Scene::Implementation::CallbackJoint, this->dataPtr.get()))
{
ignerr << "Error subscribing to topic [" << topic << "]" << std::endl;
return false;
}
else
{
ignmsg << "Subscribed to topic: [joint_state]" << std::endl;
}
}
}
std::string topic = "/world/" + this->dataPtr->worldName + "/pose/info";
// Subscribe to a topic by registering a callback.
if (!this->dataPtr->node.Subscribe(
topic, &Scene::Implementation::CallbackPoses, this->dataPtr.get()))
{
ignerr << "Error subscribing to topic [" << topic << "]" << std::endl;
return false;
}
else
{
ignmsg << "Subscribed to topic: [" << topic << "]" << std::endl;
}
topic = "/world/" + this->dataPtr->worldName + "/scene/info";
if (!this->dataPtr->node.Subscribe(
topic, &Scene::Implementation::CallbackScene, this->dataPtr.get()))
{
ignerr << "Error subscribing to topic [" << topic << "]" << std::endl;
return false;
}
else
{
ignmsg << "Subscribed to topic: [" << topic << "]" << std::endl;
}
topic = "/world/" + this->dataPtr->worldName + "/scene/deletion";
if (!this->dataPtr->node.Subscribe(
topic, &Scene::Implementation::CallbackSceneDeletion,
this->dataPtr.get()))
{
ignerr << "Error subscribing to topic [" << topic << "]" << std::endl;
return false;
}
else
{
ignmsg << "Subscribed to topic: [" << topic << "]" << std::endl;
}
this->dataPtr->USDLayerNoticeListener =
std::make_shared<FUSDLayerNoticeListener>(
this->dataPtr->stage,
this->dataPtr->worldName);
auto LayerReloadKey = pxr::TfNotice::Register(
pxr::TfCreateWeakPtr(this->dataPtr->USDLayerNoticeListener.get()),
&FUSDLayerNoticeListener::HandleGlobalLayerReload);
auto LayerChangeKey = pxr::TfNotice::Register(
pxr::TfCreateWeakPtr(this->dataPtr->USDLayerNoticeListener.get()),
&FUSDLayerNoticeListener::HandleRootOrSubLayerChange,
this->dataPtr->stage->Lock()->GetRootLayer());
this->dataPtr->USDNoticeListener = std::make_shared<FUSDNoticeListener>(
this->dataPtr->stage,
this->dataPtr->worldName,
this->dataPtr->simulatorPoses,
this->dataPtr->entitiesByName);
auto USDNoticeKey = pxr::TfNotice::Register(
pxr::TfCreateWeakPtr(this->dataPtr->USDNoticeListener.get()),
&FUSDNoticeListener::Handle);
return true;
}
//////////////////////////////////////////////////
void Scene::Save() { this->Stage()->Lock()->Save(); }
//////////////////////////////////////////////////
/// \brief Function called each time a topic update is received.
void Scene::Implementation::CallbackPoses(const ignition::msgs::Pose_V &_msg)
{
for (const auto &poseMsg : _msg.pose())
{
try
{
auto stage = this->stage->Lock();
const auto &prim = this->entities.at(poseMsg.id());
if (prim)
{
this->SetPose(pxr::UsdGeomXformCommonAPI(prim), poseMsg);
}
}
catch (const std::out_of_range &)
{
ignwarn << "Error updating pose, cannot find [" << poseMsg.name() << " - " << poseMsg.id() << "]"
<< std::endl;
}
}
}
//////////////////////////////////////////////////
/// \brief Function called each time a topic update is received.
void Scene::Implementation::CallbackJoint(const ignition::msgs::Model &_msg)
{
// this->UpdateModel(_msg);
for (const auto &joint : _msg.joint())
{
if (!this->UpdateJoint(joint, _msg.name()))
{
ignerr << "Failed to update model [" << _msg.name() << "]" << std::endl;
return;
}
}
}
//////////////////////////////////////////////////
void Scene::Implementation::CallbackScene(const ignition::msgs::Scene &_scene)
{
this->UpdateScene(_scene);
}
//////////////////////////////////////////////////
void Scene::Implementation::CallbackSceneDeletion(
const ignition::msgs::UInt32_V &_msg)
{
for (const auto id : _msg.data())
{
try
{
auto stage = this->stage->Lock();
const auto &prim = this->entities.at(id);
std::string primName = prim.GetName();
stage->RemovePrim(prim.GetPath());
ignmsg << "Removed [" << prim.GetPath() << "]" << std::endl;
this->entities.erase(id);
this->entitiesByName.erase(primName);
}
catch (const std::out_of_range &)
{
ignwarn << "Failed to delete [" << id << "] (Unable to find node)"
<< std::endl;
}
}
}
} // namespace omniverse
} // namespace ignition
|
gazebosim/gz-omni/source/ignition_live/Scene.hpp | /*
* Copyright (C) 2021 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License"); * you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_SCENE_HPP
#define IGNITION_OMNIVERSE_SCENE_HPP
#include "Error.hpp"
#include "ThreadSafe.hpp"
#include <ignition/utils/ImplPtr.hh>
#include <ignition/math/Pose3.hh>
#include <ignition/msgs/joint.pb.h>
#include <ignition/msgs/link.pb.h>
#include <ignition/msgs/model.pb.h>
#include <ignition/msgs/pose.pb.h>
#include <ignition/msgs/pose_v.pb.h>
#include <ignition/msgs/scene.pb.h>
#include <ignition/msgs/vector3d.pb.h>
#include <ignition/msgs/visual.pb.h>
#include <ignition/transport.hh>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/sphere.h>
#include <pxr/usd/usdGeom/capsule.h>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/cylinder.h>
#include <pxr/usd/usdGeom/mesh.h>
#include <pxr/usd/usdShade/material.h>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
#include <cstdint>
#include <memory>
#include <string>
#include <thread>
#include <unordered_map>
namespace ignition
{
namespace omniverse
{
enum class Simulator : int { Ignition, IsaacSim };
class Scene
{
public:
Scene(
const std::string &_worldName,
const std::string &_stageUrl,
Simulator _simulatorPoses);
/// \brief Initialize the scene and subscribes for updates. This blocks until
/// the scene is initialized.
/// \return true if success
bool Init();
/// \brief Equivalent to `scene.Stage().Lock()->Save()`.
void Save();
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &Stage();
/// \internal
/// \brief Private data pointer
IGN_UTILS_UNIQUE_IMPL_PTR(dataPtr)
};
} // namespace omniverse
} // namespace ignition
#endif
|
gazebosim/gz-omni/source/ignition_live/Mesh.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_MESH_HPP
#define IGNITION_OMNIVERSE_MESH_HPP
#include <ignition/msgs/meshgeom.pb.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/mesh.h>
namespace ignition
{
namespace omniverse
{
pxr::UsdGeomMesh UpdateMesh(const ignition::msgs::MeshGeom& _meshMsg,
const std::string& _path,
const pxr::UsdStageRefPtr& _stage);
}
} // namespace ignition
#endif
|
gazebosim/gz-omni/source/ignition_live/Material.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Material.hpp"
#include <ignition/common/Console.hh>
#include <ignition/math/Color.hh>
#include <pxr/usd/usd/tokens.h>
#include <pxr/usd/usdGeom/gprim.h>
#include <pxr/usd/usdShade/material.h>
#include <pxr/usd/usdShade/materialBindingAPI.h>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <OmniClient.h>
namespace ignition
{
namespace omniverse
{
/// Return the full path of an URL
/// If the resource is a URI we try to find to file in the filesystem
/// \brief _fullPath URI of the resource
std::string checkURI(const std::string _fullPath)
{
// TODO (ahcorde): This code is duplicated is the USD converter (sdformat)
ignition::common::URI uri(_fullPath);
std::string fullPath = _fullPath;
std::string home;
if (!ignition::common::env("HOME", home, false))
{
ignwarn << "The HOME environment variable was not defined, "
<< "so the resource [" << fullPath << "] could not be found\n";
return "";
}
if (uri.Scheme() == "http" || uri.Scheme() == "https")
{
auto systemPaths = ignition::common::systemPaths();
std::vector<std::string> tokens = ignition::common::split(uri.Path().Str(), "/");
std::string server = tokens[0];
std::string versionServer = tokens[1];
std::string owner = ignition::common::lowercase(tokens[2]);
std::string type = ignition::common::lowercase(tokens[3]);
std::string modelName = ignition::common::lowercase(tokens[4]);
std::string modelVersion = ignition::common::lowercase(tokens[5]);
fullPath = ignition::common::joinPaths(
home, ".ignition", "fuel", server, owner, type, modelName, modelVersion);
systemPaths->AddFilePaths(fullPath);
for (int i = 7; i < tokens.size(); i++)
{
fullPath = ignition::common::joinPaths(
fullPath, ignition::common::lowercase(tokens[i]));
systemPaths->AddFilePaths(fullPath);
}
}
return fullPath;
}
/// \brief Copy a file in a directory
/// \param[in] _path path where the copy will be located
/// \param[in] _fullPath name of the file to copy
/// \param[in] _stageDirUrl stage directory URL to copy materials if required
bool copyMaterial(
const std::string &_path,
const std::string &_fullPath,
const std::string &_stageDirUrl)
{
if (!_path.empty() && !_fullPath.empty())
{
///
auto fileName = ignition::common::basename(_path);
auto filePathIndex = _path.rfind(fileName);
auto filePath = _path.substr(0, filePathIndex);
if (!omniClientWaitFor(omniClientCopy(
_fullPath.c_str(),
std::string(_stageDirUrl + "/" + _path).c_str(),
nullptr,
nullptr), 1000))
{
ignerr << "omniClientCopy timeout. Not able to copy file ["
<< _fullPath.c_str() << "]" << "in nucleus ["
<< std::string(_stageDirUrl + "/" + _path) << "]." ;
}
}
return false;
}
/// \brief Create the path to copy the material
/// \param[in] _uri full path of the file to copy
/// \return A relative path to save the material, the path looks like:
/// materials/textures/<filename with extension>
std::string getMaterialCopyPath(const std::string &_uri)
{
return ignition::common::joinPaths(
".",
"materials",
"textures",
ignition::common::basename(_uri));
}
/// \brief Fill Material shader attributes and properties
/// \param[in] _prim USD primitive
/// \param[in] _name Name of the field attribute or property
/// \param[in] _vType Type of the field
/// \param[in] _value Value of the field
/// \param[in] _customData Custom data to set the field
/// \param[in] _displayName Display name
/// \param[in] _displayGroup Display group
/// \param[in] _doc Documentation of the field
/// \param[in] _colorSpace if the material is a texture, we can specify the
/// colorSpace of the image
template <typename T>
void CreateMaterialInput(
const pxr::UsdPrim &_prim, const std::string &_name,
const pxr::SdfValueTypeName &_vType, T _value,
const std::map<pxr::TfToken, pxr::VtValue> &_customData,
const pxr::TfToken &_displayName = pxr::TfToken(""),
const pxr::TfToken &_displayGroup = pxr::TfToken(""),
const std::string &_doc = "",
const pxr::TfToken &_colorSpace = pxr::TfToken(""))
{
auto shader = pxr::UsdShadeShader(_prim);
if (shader)
{
auto existingInput = shader.GetInput(pxr::TfToken(_name));
pxr::SdfValueTypeName vTypeName;
if (_vType.IsScalar())
{
vTypeName = _vType.GetScalarType();
}
else if (_vType.IsArray())
{
vTypeName = _vType.GetArrayType();
}
auto surfaceInput = shader.CreateInput(pxr::TfToken(_name), vTypeName);
surfaceInput.Set(_value);
auto attr = surfaceInput.GetAttr();
for (const auto &[key, customValue] : _customData)
{
attr.SetCustomDataByKey(key, customValue);
}
if (!_displayName.GetString().empty())
{
attr.SetDisplayName(_displayName);
}
if (!_displayGroup.GetString().empty())
{
attr.SetDisplayGroup(_displayGroup);
}
if (!_doc.empty())
{
attr.SetDocumentation(_doc);
}
if (!_colorSpace.GetString().empty())
{
attr.SetColorSpace(_colorSpace);
}
}
else
{
ignerr << "Not able to convert the prim to a UsdShadeShader" << std::endl;
}
}
/// \param[in] _stageDirUrl stage directory URL to copy materials if required
bool SetMaterial(const pxr::UsdGeomGprim &_gprim,
const ignition::msgs::Visual &_visualMsg,
const pxr::UsdStageRefPtr &_stage,
const std::string &_stageDirUrl)
{
if (!_visualMsg.has_material())
{
return true;
}
const std::string mtlPath = "/Looks/Material_" + _visualMsg.name() + "_" +
std::to_string(_visualMsg.id());
pxr::UsdShadeMaterial material =
pxr::UsdShadeMaterial::Define(_stage, pxr::SdfPath(mtlPath));
auto usdShader =
pxr::UsdShadeShader::Define(_stage, pxr::SdfPath(mtlPath + "/Shader"));
auto shaderPrim = usdShader.GetPrim();
auto shaderOut =
pxr::UsdShadeConnectableAPI(shaderPrim)
.CreateOutput(pxr::TfToken("out"), pxr::SdfValueTypeNames->Token);
material.CreateSurfaceOutput(pxr::TfToken("mdl")).ConnectToSource(shaderOut);
material.CreateVolumeOutput(pxr::TfToken("mdl")).ConnectToSource(shaderOut);
material.CreateDisplacementOutput(pxr::TfToken("mdl"))
.ConnectToSource(shaderOut);
usdShader.GetImplementationSourceAttr().Set(pxr::UsdShadeTokens->sourceAsset);
usdShader.SetSourceAsset(pxr::SdfAssetPath("OmniPBR.mdl"),
pxr::TfToken("mdl"));
usdShader.SetSourceAssetSubIdentifier(pxr::TfToken("OmniPBR"),
pxr::TfToken("mdl"));
std::map<pxr::TfToken, pxr::VtValue> customDataDiffuse = {
{pxr::TfToken("default"), pxr::VtValue(pxr::GfVec3f(0.2, 0.2, 0.2))},
{pxr::TfToken("range:max"),
pxr::VtValue(pxr::GfVec3f(100000, 100000, 100000))},
{pxr::TfToken("range:min"), pxr::VtValue(pxr::GfVec3f(0, 0, 0))}};
ignition::math::Color diffuse(
_visualMsg.material().diffuse().r(), _visualMsg.material().diffuse().g(),
_visualMsg.material().diffuse().b(), _visualMsg.material().diffuse().a());
CreateMaterialInput<pxr::GfVec3f>(
shaderPrim, "diffuse_color_constant", pxr::SdfValueTypeNames->Color3f,
pxr::GfVec3f(diffuse.R(), diffuse.G(), diffuse.B()), customDataDiffuse,
pxr::TfToken("Base Color"), pxr::TfToken("Albedo"),
"This is the base color");
std::map<pxr::TfToken, pxr::VtValue> customDataEmissive = {
{pxr::TfToken("default"), pxr::VtValue(pxr::GfVec3f(1, 0.1, 0.1))},
{pxr::TfToken("range:max"),
pxr::VtValue(pxr::GfVec3f(100000, 100000, 100000))},
{pxr::TfToken("range:min"), pxr::VtValue(pxr::GfVec3f(0, 0, 0))}};
ignition::math::Color emissive(_visualMsg.material().emissive().r(),
_visualMsg.material().emissive().g(),
_visualMsg.material().emissive().b(),
_visualMsg.material().emissive().a());
CreateMaterialInput<pxr::GfVec3f>(
shaderPrim, "emissive_color", pxr::SdfValueTypeNames->Color3f,
pxr::GfVec3f(emissive.R(), emissive.G(), emissive.B()),
customDataEmissive, pxr::TfToken("Emissive Color"),
pxr::TfToken("Emissive"), "The emission color");
std::map<pxr::TfToken, pxr::VtValue> customDataEnableEmission = {
{pxr::TfToken("default"), pxr::VtValue(0)}};
CreateMaterialInput<bool>(
shaderPrim, "enable_emission", pxr::SdfValueTypeNames->Bool,
emissive.A() > 0, customDataEnableEmission,
pxr::TfToken("Enable Emissive"), pxr::TfToken("Emissive"),
"Enables the emission of light from the material");
std::map<pxr::TfToken, pxr::VtValue> customDataIntensity = {
{pxr::TfToken("default"), pxr::VtValue(40)},
{pxr::TfToken("range:max"), pxr::VtValue(100000)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}};
CreateMaterialInput<float>(
shaderPrim, "emissive_intensity", pxr::SdfValueTypeNames->Float,
emissive.A(), customDataIntensity, pxr::TfToken("Emissive Intensity"),
pxr::TfToken("Emissive"), "Intensity of the emission");
if (_visualMsg.material().has_pbr())
{
auto pbr = _visualMsg.material().pbr();
std::map<pxr::TfToken, pxr::VtValue> customDataMetallicConstant =
{
{pxr::TfToken("default"), pxr::VtValue(0.5)},
{pxr::TfToken("range:max"), pxr::VtValue(1)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}
};
CreateMaterialInput<float>(
shaderPrim,
"metallic_constant",
pxr::SdfValueTypeNames->Float,
pbr.metalness(),
customDataMetallicConstant,
pxr::TfToken("Metallic Amount"),
pxr::TfToken("Reflectivity"),
"Metallic Material");
std::map<pxr::TfToken, pxr::VtValue> customDataRoughnessConstant =
{
{pxr::TfToken("default"), pxr::VtValue(0.5)},
{pxr::TfToken("range:max"), pxr::VtValue(1)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}
};
CreateMaterialInput<float>(
shaderPrim,
"reflection_roughness_constant",
pxr::SdfValueTypeNames->Float,
pbr.roughness(),
customDataRoughnessConstant,
pxr::TfToken("Roughness Amount"),
pxr::TfToken("Reflectivity"),
"Higher roughness values lead to more blurry reflections");
if (!pbr.albedo_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataDiffuseTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.albedo_map());
std::string albedoMapURI = checkURI(pbr.albedo_map());
std::string fullnameAlbedoMap =
ignition::common::findFile(
ignition::common::basename(albedoMapURI));
if (fullnameAlbedoMap.empty())
{
fullnameAlbedoMap = pbr.albedo_map();
}
copyMaterial(copyPath, fullnameAlbedoMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"diffuse_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataDiffuseTexture,
pxr::TfToken("Base Map"),
pxr::TfToken("Albedo"),
"",
pxr::TfToken("auto"));
}
if (!pbr.metalness_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataMetallnessTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.metalness_map());
std::string fullnameMetallnessMap =
ignition::common::findFile(
ignition::common::basename(pbr.metalness_map()));
if (fullnameMetallnessMap.empty())
{
fullnameMetallnessMap = pbr.metalness_map();
}
copyMaterial(copyPath, fullnameMetallnessMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"metallic_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataMetallnessTexture,
pxr::TfToken("Metallic Map"),
pxr::TfToken("Reflectivity"),
"",
pxr::TfToken("raw"));
}
if (!pbr.normal_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataNormalTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.normal_map());
std::string fullnameNormalMap =
ignition::common::findFile(
ignition::common::basename(pbr.normal_map()));
if (fullnameNormalMap.empty())
{
fullnameNormalMap = pbr.normal_map();
}
copyMaterial(copyPath, fullnameNormalMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"normalmap_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataNormalTexture,
pxr::TfToken("Normal Map"),
pxr::TfToken("Normal"),
"",
pxr::TfToken("raw"));
}
if (!pbr.roughness_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataRoughnessTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.roughness_map());
std::string fullnameRoughnessMap =
ignition::common::findFile(
ignition::common::basename(pbr.roughness_map()));
if (fullnameRoughnessMap.empty())
{
fullnameRoughnessMap = pbr.roughness_map();
}
copyMaterial(copyPath, fullnameRoughnessMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"reflectionroughness_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataRoughnessTexture,
pxr::TfToken("RoughnessMap Map"),
pxr::TfToken("RoughnessMap"),
"",
pxr::TfToken("raw"));
std::map<pxr::TfToken, pxr::VtValue>
customDataRoughnessTextureInfluence =
{
{pxr::TfToken("default"), pxr::VtValue(0)},
{pxr::TfToken("range:max"), pxr::VtValue(1)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}
};
CreateMaterialInput<bool>(
shaderPrim,
"reflection_roughness_texture_influence",
pxr::SdfValueTypeNames->Bool,
true,
customDataRoughnessTextureInfluence,
pxr::TfToken("Roughness Map Influence"),
pxr::TfToken("Reflectivity"),
"",
pxr::TfToken("raw"));
}
}
pxr::UsdShadeMaterialBindingAPI(_gprim).Bind(material);
return true;
}
} // namespace omniverse
} // namespace ignition
|
gazebosim/gz-omni/source/ignition_live/FUSDNoticeListener.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "FUSDNoticeListener.hpp"
#include "GetOp.hpp"
#include <ignition/common/Console.hh>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/notice.h>
#include <pxr/usd/usd/primRange.h>
#include <pxr/usd/usdGeom/sphere.h>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/cylinder.h>
#include <ignition/transport/Node.hh>
#include <ignition/msgs/model.pb.h>
#include <sdf/Collision.hh>
#include <sdf/Geometry.hh>
#include <sdf/Root.hh>
#include <sdf/Link.hh>
#include <sdf/Model.hh>
#include <sdf/Sphere.hh>
#include <sdf/Visual.hh>
namespace ignition
{
namespace omniverse
{
class FUSDNoticeListener::Implementation
{
public:
void ParseCube(const pxr::UsdPrim &_prim, sdf::Link &_link);
void ParseCylinder(const pxr::UsdPrim &_prim, sdf::Link &_link);
void ParseSphere(const pxr::UsdPrim &_prim, sdf::Link &_link);
bool ParsePrim(const pxr::UsdPrim &_prim, sdf::Link &_link)
{
if (_prim.IsA<pxr::UsdGeomSphere>())
{
ParseSphere(_prim, _link);
return true;
}
else if (_prim.IsA<pxr::UsdGeomCylinder>())
{
ParseCylinder(_prim, _link);
}
return false;
}
void CreateSDF(sdf::Link &_link, const pxr::UsdPrim &_prim)
{
if (!_prim)
return;
if (ParsePrim(_prim, _link))
{
return;
}
else
{
auto children = _prim.GetChildren();
for (const pxr::UsdPrim &childPrim : children)
{
if (ParsePrim(childPrim, _link))
{
return;
}
else
{
CreateSDF(_link, childPrim);
}
}
}
}
void jointStateCb(const ignition::msgs::Model &_msg);
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> stage;
std::string worldName;
std::unordered_map<std::string, transport::Node::Publisher> revoluteJointPublisher;
/// \brief Ignition communication node.
public: transport::Node node;
Simulator simulatorPoses;
std::mutex jointStateMsgMutex;
std::unordered_map<std::string, double> jointStateMap;
std::unordered_map<std::string, uint32_t> * entitiesByName;
};
void FUSDNoticeListener::Implementation::ParseCube(
const pxr::UsdPrim &_prim, sdf::Link &_link)
{
// double size;
// auto variant_cylinder = pxr::UsdGeomCube(_prim);
// variant_cylinder.GetSizeAttr().Get(&size);
}
void FUSDNoticeListener::Implementation::ParseCylinder(
const pxr::UsdPrim &_prim, sdf::Link &_link)
{
// auto variant_cylinder = pxr::UsdGeomCylinder(_prim);
// double radius;
// double height;
// variant_cylinder.GetRadiusAttr().Get(&radius);
// variant_cylinder.GetHeightAttr().Get(&height);
}
void FUSDNoticeListener::Implementation::ParseSphere(
const pxr::UsdPrim &_prim, sdf::Link &_link)
{
double radius;
auto variant_sphere = pxr::UsdGeomSphere(_prim);
variant_sphere.GetRadiusAttr().Get(&radius);
sdf::Visual visual;
sdf::Collision collision;
sdf::Geometry geom;
sdf::Sphere sphere;
geom.SetType(sdf::GeometryType::SPHERE);
sphere.SetRadius(radius);
geom.SetSphereShape(sphere);
visual.SetName("sphere_visual");
visual.SetGeom(geom);
collision.SetName("sphere_collision");
collision.SetGeom(geom);
_link.AddVisual(visual);
_link.AddCollision(collision);
}
FUSDNoticeListener::FUSDNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string &_worldName,
Simulator _simulatorPoses,
std::unordered_map<std::string, uint32_t> &_entitiesByName)
: dataPtr(ignition::utils::MakeUniqueImpl<Implementation>())
{
this->dataPtr->stage = _stage;
this->dataPtr->worldName = _worldName;
this->dataPtr->simulatorPoses = _simulatorPoses;
this->dataPtr->entitiesByName = &_entitiesByName;
std::string jointStateTopic = "/joint_states";
this->dataPtr->node.Subscribe(
jointStateTopic,
&FUSDNoticeListener::Implementation::jointStateCb,
this->dataPtr.get());
}
void FUSDNoticeListener::Implementation::jointStateCb(
const ignition::msgs::Model &_msg)
{
std::lock_guard<std::mutex> lock(this->jointStateMsgMutex);
for(int i = 0; i < _msg.joint_size(); ++i)
{
this->jointStateMap[_msg.joint(i).name()] =
_msg.joint(i).axis1().position();
}
}
void FUSDNoticeListener::Handle(
const class pxr::UsdNotice::ObjectsChanged &ObjectsChanged)
{
auto stage = this->dataPtr->stage->Lock();
for (const pxr::SdfPath &objectsChanged : ObjectsChanged.GetResyncedPaths())
{
ignmsg << "Resynced Path: " << objectsChanged.GetText() << std::endl;
auto modelUSD = stage->GetPrimAtPath(objectsChanged);
std::string primName = modelUSD.GetName();
if (primName.find("ROS_") != std::string::npos ||
primName.find("PhysicsScene") != std::string::npos)
{
continue;
}
if (modelUSD)
{
std::string strPath = objectsChanged.GetText();
if (strPath.find("_link") != std::string::npos
|| strPath.find("_visual") != std::string::npos
|| strPath.find("geometry") != std::string::npos) {
return;
}
auto it = this->dataPtr->entitiesByName->find(modelUSD.GetName().GetString());
if (it != this->dataPtr->entitiesByName->end())
{
continue;
}
auto range = pxr::UsdPrimRange::Stage(*stage);
for (auto const &prim : range)
{
if (prim.GetName().GetString() == primName)
{
continue;
}
}
sdf::Root root;
sdf::Model model;
model.SetName(modelUSD.GetPath().GetName());
model.SetRawPose(ignition::math::Pose3d());
sdf::Link link;
link.SetName(modelUSD.GetPath().GetName());
this->dataPtr->CreateSDF(link, modelUSD);
model.AddLink(link);
root.SetModel(model);
// Prepare the input parameters.
ignition::msgs::EntityFactory req;
req.set_sdf(root.ToElement()->ToString(""));
req.set_name(modelUSD.GetPath().GetName());
req.set_allow_renaming(false);
igndbg << "root.ToElement()->ToString("") "
<< root.ToElement()->ToString("") << '\n';
ignition::msgs::Boolean rep;
bool result;
unsigned int timeout = 5000;
bool executed = this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/create",
req, timeout, rep, result);
if (executed)
{
if (rep.data())
{
igndbg << "Model was inserted [" << modelUSD.GetPath().GetName()
<< "]" << '\n';
}
else
{
igndbg << "Error model was not inserted" << '\n';
}
}
}
}
ignition::msgs::Pose_V req;
if (this->dataPtr->simulatorPoses == Simulator::IsaacSim)
{
// this loop checks all paths to find revolute joints
// if there is some, we get the body0 and body1 and calculate the
// joint angle.
auto range = pxr::UsdPrimRange::Stage(*stage);
{
std::lock_guard<std::mutex> lock(this->dataPtr->jointStateMsgMutex);
for (auto const &prim : range)
{
std::string primType = prim.GetPrimTypeInfo().GetTypeName().GetText();
if (primType == std::string("PhysicsRevoluteJoint"))
{
std::string topic = transport::TopicUtils::AsValidTopic(
std::string("/model/") + std::string("panda") +
std::string("/joint/") + prim.GetPath().GetName() +
std::string("/0/cmd_pos"));
auto pub = this->dataPtr->revoluteJointPublisher.find(topic);
if (pub == this->dataPtr->revoluteJointPublisher.end())
{
this->dataPtr->revoluteJointPublisher[topic] =
this->dataPtr->node.Advertise<msgs::Double>(topic);
}
else
{
msgs::Double cmd;
float pos = this->dataPtr->jointStateMap[prim.GetName()];
cmd.set_data(pos);
pub->second.Publish(cmd);
}
}
}
}
for (const pxr::SdfPath &objectsChanged :
ObjectsChanged.GetChangedInfoOnlyPaths())
{
if (std::string(objectsChanged.GetText()) == "/")
continue;
igndbg << "path " << objectsChanged.GetText() << std::endl;
auto modelUSD = stage->GetPrimAtPath(objectsChanged.GetParentPath());
auto property = modelUSD.GetPropertyAtPath(objectsChanged);
std::string strProperty = property.GetBaseName().GetText();
if (strProperty == "radius")
{
double radius;
auto attribute = modelUSD.GetAttributeAtPath(objectsChanged);
attribute.Get(&radius);
}
if (strProperty == "translate")
{
auto xform = pxr::UsdGeomXformable(modelUSD);
auto transforms = GetOp(xform);
auto currentPrim = modelUSD;
ignition::math::Quaterniond q(
transforms.rotXYZ[0],
transforms.rotXYZ[1],
transforms.rotXYZ[2]);
if (currentPrim.GetName() == "geometry")
{
currentPrim = currentPrim.GetParent();
auto visualXform = pxr::UsdGeomXformable(currentPrim);
auto visualOp = GetOp(visualXform);
transforms.position += visualOp.position;
ignition::math::Quaterniond qX, qY, qZ;
ignition::math::Angle angleX(IGN_DTOR(visualOp.rotXYZ[0]));
ignition::math::Angle angleY(IGN_DTOR(visualOp.rotXYZ[1]));
ignition::math::Angle angleZ(IGN_DTOR(visualOp.rotXYZ[2]));
qX = ignition::math::Quaterniond(angleX.Normalized().Radian(), 0, 0);
qY = ignition::math::Quaterniond(0, angleY.Normalized().Radian(), 0);
qZ = ignition::math::Quaterniond(0, 0, angleZ.Normalized().Radian());
q = ((q * qX) * qY) * qZ;
transforms.scale = pxr::GfVec3f(
transforms.scale[0] * visualOp.scale[0],
transforms.scale[1] * visualOp.scale[1],
transforms.scale[2] * visualOp.scale[2]);
}
auto currentPrimName = currentPrim.GetName().GetString();
int substrIndex = currentPrimName.size() - std::string("_visual").size();
if (substrIndex >= 0 && substrIndex < currentPrimName.size())
{
if (currentPrimName.substr(substrIndex).find("_visual") !=
std::string::npos)
{
currentPrim = currentPrim.GetParent();
auto linkXform = pxr::UsdGeomXformable(currentPrim);
auto linkOp = GetOp(linkXform);
transforms.position += linkOp.position;
ignition::math::Quaterniond qX, qY, qZ;
ignition::math::Angle angleX(IGN_DTOR(linkOp.rotXYZ[0]));
ignition::math::Angle angleY(IGN_DTOR(linkOp.rotXYZ[1]));
ignition::math::Angle angleZ(IGN_DTOR(linkOp.rotXYZ[2]));
qX = ignition::math::Quaterniond(angleX.Normalized().Radian(), 0, 0);
qY = ignition::math::Quaterniond(0, angleY.Normalized().Radian(), 0);
qZ = ignition::math::Quaterniond(0, 0, angleZ.Normalized().Radian());
q = ((q * qX) * qY) * qZ;
transforms.scale = pxr::GfVec3f(
transforms.scale[0] * linkOp.scale[0],
transforms.scale[1] * linkOp.scale[1],
transforms.scale[2] * linkOp.scale[2]);
}
}
currentPrimName = currentPrim.GetName().GetString();
substrIndex = currentPrimName.size() - std::string("_link").size();
if (substrIndex >= 0 && substrIndex < currentPrimName.size())
{
if (currentPrimName.substr(substrIndex).find("_link") !=
std::string::npos)
{
currentPrim = currentPrim.GetParent();
auto modelXform = pxr::UsdGeomXformable(currentPrim);
auto modelOp = GetOp(modelXform);
transforms.position += modelOp.position;
ignition::math::Quaterniond qX, qY, qZ;
ignition::math::Angle angleX(IGN_DTOR(modelOp.rotXYZ[0]));
ignition::math::Angle angleY(IGN_DTOR(modelOp.rotXYZ[1]));
ignition::math::Angle angleZ(IGN_DTOR(modelOp.rotXYZ[2]));
qX = ignition::math::Quaterniond(angleX.Normalized().Radian(), 0, 0);
qY = ignition::math::Quaterniond(0, angleY.Normalized().Radian(), 0);
qZ = ignition::math::Quaterniond(0, 0, angleZ.Normalized().Radian());
q = ((q * qX) * qY) * qZ;
transforms.scale = pxr::GfVec3f(
transforms.scale[0] * modelOp.scale[0],
transforms.scale[1] * modelOp.scale[1],
transforms.scale[2] * modelOp.scale[2]);
}
}
std::size_t found = std::string(currentPrim.GetName()).find("_link");
if (found != std::string::npos)
continue;
found = std::string(currentPrim.GetName()).find("_visual");
if (found != std::string::npos)
continue;
auto poseMsg = req.add_pose();
poseMsg->set_name(currentPrim.GetName());
poseMsg->mutable_position()->set_x(transforms.position[0]);
poseMsg->mutable_position()->set_y(transforms.position[1]);
poseMsg->mutable_position()->set_z(transforms.position[2]);
poseMsg->mutable_orientation()->set_x(q.X());
poseMsg->mutable_orientation()->set_y(q.Y());
poseMsg->mutable_orientation()->set_z(q.Z());
poseMsg->mutable_orientation()->set_w(q.W());
}
}
if (req.pose_size() > 0)
{
bool result;
ignition::msgs::Boolean rep;
unsigned int timeout = 100;
bool executed = this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/set_pose_vector",
req, timeout, rep, result);
if (executed)
{
if (!result)
ignerr << "Service call failed" << std::endl;
}
else
ignerr << "Service [/world/" << this->dataPtr->worldName
<< "/set_pose_vector] call timed out" << std::endl;
}
}
}
} // namespace omniverse
} // namespace ignition
|
gazebosim/gz-omni/source/ignition_live/Joint.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_JOINT_HPP
#define IGNITION_OMNIVERSE_JOINT_HPP
#include <ignition/msgs/joint.pb.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usd/stage.h>
namespace ignition::omniverse
{
pxr::UsdPrim CreateFixedJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage);
pxr::UsdPrim CreateRevoluteJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage);
} // namespace ignition::omniverse
#endif
|
gazebosim/gz-omni/source/ignition_live/Mesh.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Mesh.hpp"
#include <ignition/common/Console.hh>
#include <ignition/common/Mesh.hh>
#include <ignition/common/MeshManager.hh>
#include <ignition/common/SubMesh.hh>
#include <ignition/common/URI.hh>
#include <ignition/common/Util.hh>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
namespace ignition::omniverse
{
bool endsWith(const std::string_view &str, const std::string_view &suffix)
{
return str.size() >= suffix.size() &&
0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
}
inline std::string removeDash(const std::string &_str)
{
std::string result = _str;
std::replace(result.begin(), result.end(), '-', '_');
return result;
}
pxr::UsdGeomMesh UpdateMesh(const ignition::msgs::MeshGeom &_meshMsg,
const std::string &_path,
const pxr::UsdStageRefPtr &_stage)
{
ignition::common::URI uri(_meshMsg.filename());
std::string fullname;
std::string home;
if (!ignition::common::env("HOME", home, false))
{
ignerr << "The HOME environment variable was not defined, "
<< "so the resource [" << fullname << "] could not be found\n";
return pxr::UsdGeomMesh();
}
if (uri.Scheme() == "https" || uri.Scheme() == "http")
{
auto systemPaths = ignition::common::systemPaths();
std::vector<std::string> tokens = ignition::common::split(uri.Path().Str(), "/");
std::string server = tokens[0];
std::string versionServer = tokens[1];
std::string owner = ignition::common::lowercase(tokens[2]);
std::string type = ignition::common::lowercase(tokens[3]);
std::string modelName = ignition::common::lowercase(tokens[4]);
std::string modelVersion = ignition::common::lowercase(tokens[5]);
fullname = ignition::common::joinPaths(
home, ".ignition", "fuel", server, owner, type, modelName, modelVersion);
systemPaths->AddFilePaths(fullname);
for (int i = 7; i < tokens.size(); i++)
{
fullname = ignition::common::joinPaths(
fullname, ignition::common::lowercase(tokens[i]));
systemPaths->AddFilePaths(fullname);
}
}
else
{
fullname = ignition::common::findFile(_meshMsg.filename());
}
auto ignMesh = ignition::common::MeshManager::Instance()->Load(fullname);
// Some Meshes are splited in some submeshes, this loop check if the name
// of the path is the same as the name of the submesh. In this case
// we create a USD mesh per submesh.
bool isUSDPathInSubMeshName = false;
for (unsigned int i = 0; i < ignMesh->SubMeshCount(); ++i)
{
auto subMesh = ignMesh->SubMeshByIndex(i).lock();
if (ignMesh->SubMeshCount() != 1)
{
std::string pathLowerCase = ignition::common::lowercase(_path);
std::string subMeshLowerCase =
ignition::common::lowercase(subMesh->Name());
if (pathLowerCase.find(subMeshLowerCase) != std::string::npos)
{
isUSDPathInSubMeshName = true;
break;
}
}
}
for (unsigned int i = 0; i < ignMesh->SubMeshCount(); ++i)
{
pxr::VtArray<pxr::GfVec3f> meshPoints;
pxr::VtArray<pxr::GfVec2f> uvs;
pxr::VtArray<pxr::GfVec3f> normals;
pxr::VtArray<int> faceVertexIndices;
pxr::VtArray<int> faceVertexCounts;
auto subMesh = ignMesh->SubMeshByIndex(i).lock();
if (!subMesh)
{
ignerr << "Unable to get a shared pointer to submesh at index [" << i
<< "] of parent mesh [" << ignMesh->Name() << "]" << std::endl;
return pxr::UsdGeomMesh();
}
if (isUSDPathInSubMeshName)
{
if (ignMesh->SubMeshCount() != 1)
{
std::string pathLowerCase = ignition::common::lowercase(_path);
std::string subMeshLowerCase =
ignition::common::lowercase(subMesh->Name());
if (pathLowerCase.find(subMeshLowerCase) == std::string::npos)
{
continue;
}
}
}
// copy the submesh's vertices to the usd mesh's "points" array
for (unsigned int v = 0; v < subMesh->VertexCount(); ++v)
{
const auto &vertex = subMesh->Vertex(v);
meshPoints.push_back(pxr::GfVec3f(vertex.X(), vertex.Y(), vertex.Z()));
}
// copy the submesh's indices to the usd mesh's "faceVertexIndices" array
for (unsigned int j = 0; j < subMesh->IndexCount(); ++j)
faceVertexIndices.push_back(subMesh->Index(j));
// copy the submesh's texture coordinates
for (unsigned int j = 0; j < subMesh->TexCoordCount(); ++j)
{
const auto &uv = subMesh->TexCoord(j);
uvs.push_back(pxr::GfVec2f(uv[0], 1 - uv[1]));
}
// copy the submesh's normals
for (unsigned int j = 0; j < subMesh->NormalCount(); ++j)
{
const auto &normal = subMesh->Normal(j);
normals.push_back(pxr::GfVec3f(normal[0], normal[1], normal[2]));
}
// set the usd mesh's "faceVertexCounts" array according to
// the submesh primitive type
// TODO(adlarkin) support all primitive types. The computations are more
// involved for LINESTRIPS, TRIFANS, and TRISTRIPS. I will need to spend
// some time deriving what the number of faces for these primitive types
// are, given the number of indices. The "faceVertexCounts" array will
// also not have the same value for every element in the array for these
// more complex primitive types (see the TODO note in the for loop below)
unsigned int verticesPerFace = 0;
unsigned int numFaces = 0;
switch (subMesh->SubMeshPrimitiveType())
{
case ignition::common::SubMesh::PrimitiveType::POINTS:
verticesPerFace = 1;
numFaces = subMesh->IndexCount();
break;
case ignition::common::SubMesh::PrimitiveType::LINES:
verticesPerFace = 2;
numFaces = subMesh->IndexCount() / 2;
break;
case ignition::common::SubMesh::PrimitiveType::TRIANGLES:
verticesPerFace = 3;
numFaces = subMesh->IndexCount() / 3;
break;
case ignition::common::SubMesh::PrimitiveType::LINESTRIPS:
case ignition::common::SubMesh::PrimitiveType::TRIFANS:
case ignition::common::SubMesh::PrimitiveType::TRISTRIPS:
default:
ignerr << "Submesh " << subMesh->Name()
<< " has a primitive type that is not supported." << std::endl;
return pxr::UsdGeomMesh();
}
// TODO(adlarkin) update this loop to allow for varying element
// values in the array (see TODO note above). Right now, the
// array only allows for all elements to have one value, which in
// this case is "verticesPerFace"
for (unsigned int n = 0; n < numFaces; ++n)
faceVertexCounts.push_back(verticesPerFace);
std::string primName = _path + "/" + subMesh->Name();
primName = removeDash(primName);
if (endsWith(primName, "/"))
{
primName.erase(primName.size() - 1);
}
auto usdMesh = pxr::UsdGeomMesh::Define(_stage, pxr::SdfPath(_path));
usdMesh.CreatePointsAttr().Set(meshPoints);
usdMesh.CreateFaceVertexIndicesAttr().Set(faceVertexIndices);
usdMesh.CreateFaceVertexCountsAttr().Set(faceVertexCounts);
auto coordinates = usdMesh.CreatePrimvar(
pxr::TfToken("st"), pxr::SdfValueTypeNames->Float2Array,
pxr::UsdGeomTokens->vertex);
coordinates.Set(uvs);
usdMesh.CreateNormalsAttr().Set(normals);
usdMesh.SetNormalsInterpolation(pxr::TfToken("vertex"));
usdMesh.CreateSubdivisionSchemeAttr(pxr::VtValue(pxr::TfToken("none")));
const auto &meshMin = ignMesh->Min();
const auto &meshMax = ignMesh->Max();
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(pxr::GfVec3f(meshMin.X(), meshMin.Y(), meshMin.Z()));
extentBounds.push_back(pxr::GfVec3f(meshMax.X(), meshMax.Y(), meshMax.Z()));
usdMesh.CreateExtentAttr().Set(extentBounds);
// TODO (ahcorde): Material inside the submesh
int materialIndex = subMesh->MaterialIndex();
if (materialIndex != -1)
{
auto material = ignMesh->MaterialByIndex(materialIndex);
// sdf::Material materialSdf = sdf::usd::convert(material);
// auto materialUSD = ParseSdfMaterial(&materialSdf, _stage);
// if(materialSdf.Emissive() != ignition::math::Color(0, 0, 0, 1)
// || materialSdf.Specular() != ignition::math::Color(0, 0, 0, 1)
// || materialSdf.PbrMaterial())
// {
// if (materialUSD)
// {
// pxr::UsdShadeMaterialBindingAPI(usdMesh).Bind(materialUSD);
// }
// }
}
pxr::UsdGeomXformCommonAPI meshXformAPI(usdMesh);
meshXformAPI.SetScale(pxr::GfVec3f(
_meshMsg.scale().x(), _meshMsg.scale().y(), _meshMsg.scale().z()));
return usdMesh;
}
return pxr::UsdGeomMesh();
}
} // namespace ignition::omniverse
|
gazebosim/gz-omni/source/ignition_live/main.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "GetOp.hpp"
#include "OmniverseConnect.hpp"
#include "Scene.hpp"
#include "SetOp.hpp"
#include "ThreadSafe.hpp"
#include <ignition/common/Console.hh>
#include <ignition/common/SystemPaths.hh>
#include <ignition/common/StringUtils.hh>
#include <ignition/utils/cli.hh>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
#include <string>
using namespace ignition::omniverse;
constexpr double kTargetFps = 60;
constexpr std::chrono::duration<double> kUpdateRate(1 / kTargetFps);
int main(int argc, char* argv[])
{
CLI::App app("Ignition omniverse connector");
std::string destinationPath;
app.add_option("-p,--path", destinationPath,
// clang-format off
"Location of the omniverse stage. e.g. \"omniverse://localhost/Users/ignition/stage.usd\"")
// clang-format on
->required();
std::string worldName;
ignition::omniverse::Simulator simulatorPoses{
ignition::omniverse::Simulator::Ignition};
app.add_option("-w,--world", worldName, "Name of the ignition world")
->required();
std::map<std::string, ignition::omniverse::Simulator> map{
{"ignition", ignition::omniverse::Simulator::Ignition},
{"isaacsim", ignition::omniverse::Simulator::IsaacSim}};
app.add_option("--pose", simulatorPoses, "Which simulator will handle the poses")
->required()
->transform(CLI::CheckedTransformer(map, CLI::ignore_case));;
app.add_flag_callback("-v,--verbose",
[]() { ignition::common::Console::SetVerbosity(4); });
CLI11_PARSE(app, argc, argv);
std::string ignGazeboResourcePath;
auto systemPaths = ignition::common::systemPaths();
ignition::common::env("IGN_GAZEBO_RESOURCE_PATH", ignGazeboResourcePath);
for (const auto& resourcePath :
ignition::common::Split(ignGazeboResourcePath, ':'))
{
systemPaths->AddFilePaths(resourcePath);
}
// Connect with omniverse
if (!StartOmniverse())
{
ignerr << "Not able to start Omniverse" << std::endl;
return -1;
}
// Open the USD model in Omniverse
const std::string stageUrl = [&]()
{
auto result = CreateOmniverseModel(destinationPath);
if (!result)
{
ignerr << result.Error() << std::endl;
exit(-1);
}
return result.Value();
}();
omniUsdLiveSetModeForUrl(stageUrl.c_str(),
OmniUsdLiveMode::eOmniUsdLiveModeEnabled);
PrintConnectedUsername(stageUrl);
Scene scene(worldName, stageUrl, simulatorPoses);
if (!scene.Init())
{
return -1;
};
auto lastUpdate = std::chrono::steady_clock::now();
// don't spam the console, show the fps only once a sec
auto nextShowFps =
lastUpdate.time_since_epoch() + std::chrono::duration<double>(1);
while (true)
{
std::this_thread::sleep_for((lastUpdate + kUpdateRate) -
std::chrono::steady_clock::now());
auto now = std::chrono::steady_clock::now();
if (now.time_since_epoch() > nextShowFps)
{
double curFps =
1 / std::chrono::duration<double>(now - lastUpdate).count();
nextShowFps = now.time_since_epoch() + std::chrono::duration<double>(1);
igndbg << "fps: " << curFps << std::endl;
}
lastUpdate = now;
scene.Save();
omniUsdLiveProcess();
}
return 0;
}
|
gazebosim/gz-omni/source/ignition_live/OmniClientpp.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* C++ wrappers for various omniclient apis
*/
#ifndef IGNITION_OMNIVERSE_OMNICLIENTPP_HPP
#define IGNITION_OMNIVERSE_OMNICLIENTPP_HPP
#include "Error.hpp"
#include <OmniClient.h>
#include <ostream>
#include <string>
namespace ignition::omniverse
{
/// \brief RAII wrapper to omniClientLock and omniClientUnlock
class OmniverseLock
{
public:
OmniverseLock(const std::string& _url);
~OmniverseLock();
OmniverseLock(const OmniverseLock&) = delete;
OmniverseLock(OmniverseLock&&) = delete;
OmniverseLock& operator=(const OmniverseLock&) = delete;
private:
const std::string url;
};
/// \brief Synchronous API for omniverse
class OmniverseSync
{
public:
template <typename T>
using MaybeError = MaybeError<T, OmniClientResult>;
static MaybeError<OmniClientListEntry> Stat(const std::string& url) noexcept;
};
} // namespace ignition::omniverse
#endif
|
gazebosim/gz-omni/source/ignition_live/FUSDLayerNoticeListener.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_FUSDLAYERNOTICELISTENER_HPP
#define IGNITION_OMNIVERSE_FUSDLAYERNOTICELISTENER_HPP
#include "Scene.hpp"
#include "ThreadSafe.hpp"
#include <pxr/usd/usd/stage.h>
#include <ignition/common/Console.hh>
#include <ignition/utils/ImplPtr.hh>
namespace ignition
{
namespace omniverse
{
class FUSDLayerNoticeListener : public pxr::TfWeakBase
{
public:
FUSDLayerNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string& _worldName);
void HandleGlobalLayerReload(const pxr::SdfNotice::LayerDidReloadContent& n);
// Print some interesting info about the LayerNotice
void HandleRootOrSubLayerChange(
const class pxr::SdfNotice::LayersDidChangeSentPerLayer& _layerNotice,
const pxr::TfWeakPtr<pxr::SdfLayer>& _sender);
/// \internal
/// \brief Private data pointer
IGN_UTILS_UNIQUE_IMPL_PTR(dataPtr)
};
} // namespace omniverse
} // namespace ignition
#endif
|
gazebosim/gz-omni/source/ignition_live/Joint.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Joint.hpp"
pxr::UsdPrim CreateFixedJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage)
{
pxr::TfToken usdPrimTypeName("PhysicsFixedJoint");
return _stage->DefinePrim(pxr::SdfPath(_path), usdPrimTypeName);
}
pxr::UsdPrim CreateRevoluteJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage)
{
pxr::TfToken usdPrimTypeName("PhysicsRevoluteJoint");
return _stage->DefinePrim(pxr::SdfPath(_path), usdPrimTypeName);
}
|
gazebosim/gz-omni/source/ignition_live/FUSDNoticeListener.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_FUSDNOTICELISTENER_HPP
#define IGNITION_OMNIVERSE_FUSDNOTICELISTENER_HPP
#include <memory>
#include <string>
#include "ThreadSafe.hpp"
#include "Scene.hpp"
#include <pxr/usd/usd/notice.h>
namespace ignition
{
namespace omniverse
{
class FUSDNoticeListener : public pxr::TfWeakBase
{
public:
FUSDNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string &_worldName,
Simulator _simulatorPoses,
std::unordered_map<std::string, uint32_t> &entitiesByName);
void Handle(const class pxr::UsdNotice::ObjectsChanged &ObjectsChanged);
/// \internal
/// \brief Private data pointer
IGN_UTILS_UNIQUE_IMPL_PTR(dataPtr)
};
} // namespace omniverse
} // namespace ignition
#endif
|
NVlabs/ACID/README.md | [![NVIDIA Source Code License](https://img.shields.io/badge/license-NSCL-blue.svg)](https://github.com/NVlabs/ACID/blob/master/LICENSE)
![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg)
# ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation
### [Project Page](https://b0ku1.github.io/acid/) | [Paper](https://arxiv.org/abs/2203.06856)
<div style="text-align: center">
<img src="_media/model_figure.png" width="600"/>
</div>
This repository contains the codebase used in [**ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation**](https://b0ku1.github.io/acid/), which will appear in [RSS 2022](https://roboticsconference.org/program/papers/) and is nominated for Best Student Paper Award. Specifically, the repo contains code for:
* [**PlushSim**](./PlushSim/), the simulation environment used to generate all manipulation data.
* [**ACID model**](./ACID/), the implicit visual dynamics model's model and training code.
If you find our code or paper useful, please consider citing
```bibtex
@article{shen2022acid,
title={ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation},
author={Shen, Bokui and Jiang, Zhenyu and Choy, Christopher and J. Guibas, Leonidas and Savarese, Silvio and Anandkumar, Anima and Zhu, Yuke},
journal={Robotics: Science and Systems (RSS)},
year={2022}
}
```
# ACID model
Please see the [README](./ACID/README.md) for more detailed information.
# PlushSim
Please see the [README](./PlushSim/README.md) for more detailed information.
# License
Please check the [LICENSE](./LICENSE) file. ACID may be used non-commercially, meaning for research or evaluation purposes only. For business inquiries, please contact [email protected].
|
NVlabs/ACID/licenses/oss/convonet-LICENSE.txt | MIT License
Copyright (c) 2020 Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, Andreas Geiger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
NVlabs/ACID/PlushSim/all_animals.txt | teddy/teddy_aug_6/teddy_aug_6.usda
teddy/teddy_aug_11/teddy_aug_11.usda
teddy/teddy_aug_4/teddy_aug_4.usda
teddy/teddy_aug_5/teddy_aug_5.usda
teddy/teddy_aug_1/teddy_aug_1.usda
teddy/teddy_aug_10/teddy_aug_10.usda
teddy/teddy_aug_2/teddy_aug_2.usda
teddy/teddy_aug_3/teddy_aug_3.usda
teddy/teddy_scaled/teddy_scaled.usda
teddy/teddy_aug_9/teddy_aug_9.usda
teddy/teddy_aug_0/teddy_aug_0.usda
teddy/teddy_aug_8/teddy_aug_8.usda
teddy/teddy_aug_7/teddy_aug_7.usda
dog/dog_aug_7/dog_aug_7.usda
dog/dog_aug_0/dog_aug_0.usda
dog/dog_aug_8/dog_aug_8.usda
dog/dog_aug_4/dog_aug_4.usda
dog/dog_scaled/dog_scaled.usda
dog/dog_aug_11/dog_aug_11.usda
dog/dog_aug_1/dog_aug_1.usda
dog/dog_aug_6/dog_aug_6.usda
dog/dog_aug_9/dog_aug_9.usda
dog/dog_aug_3/dog_aug_3.usda
dog/dog_aug_2/dog_aug_2.usda
dog/dog_aug_5/dog_aug_5.usda
dog/dog_aug_10/dog_aug_10.usda
snake/snake_aug_1/snake_aug_1.usda
snake/snake_scaled/snake_scaled.usda
snake/snake_aug_5/snake_aug_5.usda
snake/snake_aug_4/snake_aug_4.usda
snake/snake_aug_0/snake_aug_0.usda
snake/snake_aug_9/snake_aug_9.usda
snake/snake_aug_10/snake_aug_10.usda
snake/snake_aug_7/snake_aug_7.usda
snake/snake_aug_6/snake_aug_6.usda
snake/snake_aug_11/snake_aug_11.usda
snake/snake_aug_2/snake_aug_2.usda
snake/snake_aug_8/snake_aug_8.usda
snake/snake_aug_3/snake_aug_3.usda
octopus/octopus_scaled/octopus_scaled.usda
octopus/octopus_aug_0/octopus_aug_0.usda
octopus/octopus_aug_11/octopus_aug_11.usda
octopus/octopus_aug_3/octopus_aug_3.usda
octopus/octopus_aug_1/octopus_aug_1.usda
octopus/octopus_aug_6/octopus_aug_6.usda
octopus/octopus_aug_7/octopus_aug_7.usda
octopus/octopus_aug_2/octopus_aug_2.usda
octopus/octopus_aug_10/octopus_aug_10.usda
octopus/octopus_aug_9/octopus_aug_9.usda
octopus/octopus_aug_5/octopus_aug_5.usda
octopus/octopus_aug_8/octopus_aug_8.usda
octopus/octopus_aug_4/octopus_aug_4.usda
rabbit/rabbit_aug_6/rabbit_aug_6.usda
rabbit/rabbit_aug_8/rabbit_aug_8.usda
rabbit/rabbit_aug_7/rabbit_aug_7.usda
rabbit/rabbit_aug_1/rabbit_aug_1.usda
rabbit/rabbit_aug_4/rabbit_aug_4.usda
rabbit/rabbit_aug_10/rabbit_aug_10.usda
rabbit/rabbit_aug_2/rabbit_aug_2.usda
rabbit/rabbit_aug_11/rabbit_aug_11.usda
rabbit/rabbit_aug_9/rabbit_aug_9.usda
rabbit/rabbit_aug_3/rabbit_aug_3.usda
rabbit/rabbit_aug_0/rabbit_aug_0.usda
rabbit/rabbit_aug_5/rabbit_aug_5.usda
rabbit/rabbit_scaled/rabbit_scaled.usda
elephant/elephant_aug_3/elephant_aug_3.usda
elephant/elephant_aug_11/elephant_aug_11.usda
elephant/elephant_scaled/elephant_scaled.usda
elephant/elephant_aug_1/elephant_aug_1.usda
elephant/elephant_aug_10/elephant_aug_10.usda
elephant/elephant_aug_5/elephant_aug_5.usda
elephant/elephant_aug_4/elephant_aug_4.usda
elephant/elephant_aug_7/elephant_aug_7.usda
elephant/elephant_aug_9/elephant_aug_9.usda
elephant/elephant_aug_8/elephant_aug_8.usda
elephant/elephant_aug_0/elephant_aug_0.usda
elephant/elephant_aug_6/elephant_aug_6.usda
elephant/elephant_aug_2/elephant_aug_2.usda
|
NVlabs/ACID/PlushSim/README.md | [![NVIDIA Source Code License](https://img.shields.io/badge/license-NSCL-blue.svg)](https://github.com/NVlabs/ACID/blob/master/LICENSE)
![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg)
# PlushSim
<div style="text-align: center">
<img src="../_media/plushsim.png" width="600"/>
</div>
Our PlushSim simulation environment is based on [Omniverse Kit](https://docs.omniverse.nvidia.com/prod_kit/prod_kit.html). This codebase contains the docker image and the code to simulate and manipulate deformable objects.
## Prerequisites
Omniverse Kit has a set of hardware requirements. Specifically, it requires a RTX gpu (e.g. RTX 2080, RTX 30x0, Titan RTX etc.). Also, a 16GB+ memory is recommended.
The codebase is tested on Linux Ubuntu 20.04.
## Getting the Docker Image
First, you need to install [Docker](https://docs.docker.com/engine/install/ubuntu/) and [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) before proceeding.
After you have installed Docker and NVIDIA container toolkit, you can obtain the PlushSim Docker image from DockerHub, with command:
```
docker pull b0ku1/acid-docker:cleaned
```
## Preparing Simulation Assets
You can download the simulation assets `raw_assets.zip` at: [Google Drive](https://drive.google.com/file/d/1OO8Wi0PHF3ROmW8088JNOMJn4EcDLDPB/view?usp=sharing).
After you download it, unzip the assets within this directory. You should have a folder structure like:
```
PlushSim/
assets/
animals/
...
attic_clean/
...
```
## Generating Manipulation Trajectories
Generating manipulation data consists of two steps:
1. Start Docker image, and mount the correct directory.
2. Run script
To start the docker image with an interactive session, run the following command inside `PlushSim/`:
```
export PLUSHSIM_ROOT=$(pwd)
docker run -it -v $PLUSHSIM_ROOT:/result --gpus all b0ku1/acid-docker:cleaned bash
```
Aftery entering the interactive session, you can run the following commands to start generating manipulation trajectories:
```
./python.sh /result/scripts/data_gen_attic.py
```
The above scripts will generate sample interaction sequences in `PlushSim/interaction_sequence`. There are various command line arguments that you can give to `data_gen_attic.py`. Please see documentation of the python script.
## Visualizing the assets in GUI
To visualize the assets in Omniverse GUI, you need to download and install [Omniverse](https://docs.omniverse.nvidia.com/prod_install-guide/prod_install-guide.html). The link contains NVIDIA's official instruction for installation.
After you install Omniverse, you can open the `.usda` files in the assets folder. To run PlushSim's scripts outside of Docker (e.g. with your native Omniverse installation), you can find more information at [Omniverse Kit's Python Manual](https://docs.omniverse.nvidia.com/py/kit/index.html). For questions regarding Omniverse usage, please visit [NVIDIA developer forum](https://forums.developer.nvidia.com/c/omniverse/300).
## License
Please check the [LICENSE](../LICENSE) file. ACID may be used non-commercially, meaning for research or evaluation purposes only. For business inquiries, please contact [email protected].
If you find our code or paper useful, please consider citing
```bibtex
@article{shen2022acid,
title={ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation},
author={Shen, Bokui and Jiang, Zhenyu and Choy, Christopher and J. Guibas, Leonidas and Savarese, Silvio and Anandkumar, Anima and Zhu, Yuke},
journal={Robotics: Science and Systems (RSS)},
year={2022}
}
``` |
NVlabs/ACID/PlushSim/scripts/python_app.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni.kit.app
import omni.kit
import os
import sys
import time
import asyncio
import argparse
DEFAULT_CONFIG = {
"width": 1024,
"height": 800,
"renderer": "PathTracing", # Can also be RayTracedLighting
"anti_aliasing": 3, # 3 for dlss, 2 for fxaa, 1 for taa, 0 to disable aa
"samples_per_pixel_per_frame": 64,
"denoiser": True,
"subdiv_refinement_level": 0,
"headless": True,
"max_bounces": 4,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"sync_loads": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.bloky.python.kit',
}
class OmniKitHelper:
"""Helper class for launching OmniKit from a Python environment.
Launches and configures OmniKit and exposes useful functions.
Typical usage example:
.. highlight:: python
.. code-block:: python
config = {'width': 800, 'height': 600, 'renderer': 'PathTracing'}
kit = OmniKitHelper(config) # Start omniverse kit
# <Code to generate or load a scene>
kit.update() # Render a single frame"""
def __init__(self, config=DEFAULT_CONFIG):
"""The config variable is a dictionary containing the following entries
Args:
width (int): Width of the viewport and generated images. Defaults to 1024
height (int): Height of the viewport and generated images. Defaults to 800
renderer (str): Rendering mode, can be `RayTracedLighting` or `PathTracing`. Defaults to `PathTracing`
samples_per_pixel_per_frame (int): The number of samples to render per frame, used for `PathTracing` only. Defaults to 64
denoiser (bool): Enable this to use AI denoising to improve image quality. Defaults to True
subdiv_refinement_level (int): Number of subdivisons to perform on supported geometry. Defaults to 0
headless (bool): Disable UI when running. Defaults to True
max_bounces (int): Maximum number of bounces, used for `PathTracing` only. Defaults to 4
max_specular_transmission_bounces(int): Maximum number of bounces for specular or transmission, used for `PathTracing` only. Defaults to 6
max_volume_bounces(int): Maximum number of bounces for volumetric, used for `PathTracing` only. Defaults to 4
sync_loads (bool): When enabled, will pause rendering until all assets are loaded. Defaults to False
experience (str): The config json used to launch the application.
"""
# only import custom loop runner if we create this object
# from omni.kit.loop import _loop
# initialize vars
self._exiting = False
self._is_dirty_instance_mappings = True
self._previous_physics_dt = 1.0 / 60.0
self.config = DEFAULT_CONFIG
if config is not None:
self.config.update(config)
# Load app plugin
self._framework = carb.get_framework()
print(os.environ["CARB_APP_PATH"])
self._framework.load_plugins(
loaded_file_wildcards=["omni.kit.app.plugin"],
search_paths=[os.path.abspath(f'{os.environ["CARB_APP_PATH"]}/kit/plugins')],
)
print(DEFAULT_CONFIG)
# launch kit
self.last_update_t = time.time()
self.app = omni.kit.app.get_app()
self.kit_settings = None
self._start_app()
self.carb_settings = carb.settings.acquire_settings_interface()
self.setup_renderer(mode="default") # set rtx-defaults settings
self.setup_renderer(mode="non-default") # set rtx settings
self.timeline = omni.timeline.get_timeline_interface()
# Wait for new stage to open
new_stage_task = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
print("OmniKitHelper Starting up ...")
while not new_stage_task.done():
time.sleep(0.001) # This sleep prevents a deadlock in certain cases
self.update()
self.update()
# Dock windows if they exist
main_dockspace = omni.ui.Workspace.get_window("DockSpace")
def dock_window(space, name, location):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location)
return window
view = dock_window(main_dockspace, "Viewport", omni.ui.DockPosition.TOP)
self.update()
console = dock_window(view, "Console", omni.ui.DockPosition.BOTTOM)
prop = dock_window(view, "Property", omni.ui.DockPosition.RIGHT)
dock_window(view, "Main ToolBar", omni.ui.DockPosition.LEFT)
self.update()
dock_window(prop, "Render Settings", omni.ui.DockPosition.SAME)
self.update()
print("OmniKitHelper Startup Complete")
def _start_app(self):
args = [
os.path.abspath(__file__),
f'{self.config["experience"]}',
"--/persistent/app/viewport/displayOptions=0", # hide extra stuff in viewport
# Forces kit to not render until all USD files are loaded
f'--/rtx/materialDb/syncLoads={self.config["sync_loads"]}',
f'--/rtx/hydra/materialSyncLoads={self.config["sync_loads"]}'
f'--/omni.kit.plugin/syncUsdLoads={self.config["sync_loads"]}',
"--/app/content/emptyStageOnStart=False", # This is required due to a infinite loop but results in errors on launch
"--/app/hydraEngine/waitIdle=True",
"--/app/asyncRendering=False",
f'--/app/renderer/resolution/width={self.config["width"]}',
f'--/app/renderer/resolution/height={self.config["height"]}',
]
args.append(f"--portable")
args.append(f"--no-window")
args.append(f"--allow-root")
print(args)
self.app.startup("kit", f'{os.environ["CARB_APP_PATH"]}/kit', args)
def __del__(self):
if self._exiting is False and sys.meta_path is None:
print(
"\033[91m"
+ "ERROR: Python exiting while OmniKitHelper was still running, Please call shutdown() on the OmniKitHelper object to exit cleanly"
+ "\033[0m"
)
def shutdown(self):
self._exiting = True
print("Shutting Down OmniKitHelper...")
# We are exisitng but something is still loading, wait for it to load to avoid a deadlock
if self.is_loading():
print(" Waiting for USD resource operations to complete (this may take a few seconds)")
while self.is_loading():
self.app.update()
self.app.shutdown()
self._framework.unload_all_plugins()
print("Shutting Down Complete")
def get_stage(self):
"""Returns the current USD stage."""
return omni.usd.get_context().get_stage()
def set_setting(self, setting, value):
"""Convenience function to set settings.
Args:
setting (str): string representing the setting being changed
value: new value for the setting being changed, the type of this value must match its repsective setting
"""
if isinstance(value, str):
self.carb_settings.set_string(setting, value)
elif isinstance(value, bool):
self.carb_settings.set_bool(setting, value)
elif isinstance(value, int):
self.carb_settings.set_int(setting, value)
elif isinstance(value, float):
self.carb_settings.set_float(setting, value)
else:
raise ValueError(f"Value of type {type(value)} is not supported.")
def set_physics_dt(self, physics_dt: float = 1.0 / 150.0, physics_substeps: int = 1):
"""Specify the physics step size to use when simulating, default is 1/60.
Note that a physics scene has to be in the stage for this to do anything
Args:
physics_dt (float): Use this value for physics step
"""
if self.get_stage() is None:
return
if physics_dt == self._previous_physics_dt:
return
if physics_substeps is None or physics_substeps <= 1:
physics_substeps = 1
self._previous_physics_dt = physics_dt
from pxr import UsdPhysics, PhysxSchema
steps_per_second = int(1.0 / physics_dt)
min_steps = int(steps_per_second / physics_substeps)
physxSceneAPI = None
for prim in self.get_stage().Traverse():
if prim.IsA(UsdPhysics.Scene):
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(prim)
if physxSceneAPI is not None:
physxSceneAPI.GetTimeStepsPerSecondAttr().Set(steps_per_second)
settings = carb.settings.get_settings()
settings.set_int("persistent/simulation/minFrameRate", min_steps)
def update(self, dt=0.0, physics_dt=None, physics_substeps=None):
"""Render one frame. Optionally specify dt in seconds, specify None to use wallclock.
Specify physics_dt and physics_substeps to decouple the physics step size from rendering
For example: to render with a dt of 1/30 and simulate physics at 1/120 use:
- dt = 1/30.0
- physics_dt = 1/120.0
- physics_substeps = 4
Args:
dt (float): The step size used for the overall update, set to None to use wallclock
physics_dt (float, optional): If specified use this value for physics step
physics_substeps (int, optional): Maximum number of physics substeps to perform
"""
# dont update if exit was called
if self._exiting:
return
# a physics dt was specified and is > 0
if physics_dt is not None and physics_dt > 0.0:
self.set_physics_dt(physics_dt, physics_substeps)
# a dt was specified and is > 0
if dt is not None and dt > 0.0:
# if physics dt was not specified, use rendering dt
if physics_dt is None:
self.set_physics_dt(dt)
# self.loop_runner.set_runner_dt(dt)
self.app.update()
else:
# dt not specified, run in realtime
time_now = time.time()
dt = time_now - self.last_update_t
if physics_dt is None:
self.set_physics_dt(1.0 / 60.0, 4)
self.last_update_t = time_now
# self.loop_runner.set_runner_dt(dt)
self.app.update()
def play(self):
"""Starts the editor physics simulation"""
self.update()
self.timeline.play()
self.update()
def pause(self):
"""Pauses the editor physics simulation"""
self.update()
self.timeline.pause()
self.update()
def stop(self):
"""Stops the editor physics simulation"""
self.update()
self.timeline.stop()
self.update()
def get_status(self):
"""Get the status of the renderer to see if anything is loading"""
return omni.usd.get_context().get_stage_loading_status()
def is_loading(self):
"""convenience function to see if any files are being loaded
Returns:
bool: True if loading, False otherwise
"""
message, loaded, loading = self.get_status()
return loading > 0
def is_exiting(self):
"""get current exit status for this object
Returns:
bool: True if exit() was called previously, False otherwise
"""
return self._exiting
def execute(self, *args, **kwargs):
"""Allow use of omni.kit.commands interface"""
omni.kit.commands.execute(*args, **kwargs)
def setup_renderer(self, mode="non-default"):
rtx_mode = "/rtx-defaults" if mode == "default" else "/rtx"
"""Reset render settings to those in config. This should be used in case a new stage is opened and the desired config needs to be re-applied"""
self.set_setting(rtx_mode + "/rendermode", self.config["renderer"])
# Raytrace mode settings
self.set_setting(rtx_mode + "/post/aa/op", self.config["anti_aliasing"])
self.set_setting(rtx_mode + "/directLighting/sampledLighting/enabled", True)
# self.set_setting(rtx_mode + "/ambientOcclusion/enabled", True)
# Pathtrace mode settings
self.set_setting(rtx_mode + "/pathtracing/spp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/totalSpp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/clampSpp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/maxBounces", self.config["max_bounces"])
self.set_setting(
rtx_mode + "/pathtracing/maxSpecularAndTransmissionBounces",
self.config["max_specular_transmission_bounces"],
)
self.set_setting(rtx_mode + "/pathtracing/maxVolumeBounces", self.config["max_volume_bounces"])
self.set_setting(rtx_mode + "/pathtracing/optixDenoiser/enabled", self.config["denoiser"])
self.set_setting(rtx_mode + "/hydra/subdivision/refinementLevel", self.config["subdiv_refinement_level"])
# Experimental, forces kit to not render until all USD files are loaded
self.set_setting(rtx_mode + "/materialDb/syncLoads", self.config["sync_loads"])
self.set_setting(rtx_mode + "/hydra/materialSyncLoads", self.config["sync_loads"])
self.set_setting("/omni.kit.plugin/syncUsdLoads", self.config["sync_loads"])
def create_prim(
self, path, prim_type, translation=None, rotation=None, scale=None, ref=None, semantic_label=None, attributes={}
):
"""Create a prim, apply specified transforms, apply semantic label and
set specified attributes.
args:
path (str): The path of the new prim.
prim_type (str): Prim type name
translation (tuple(float, float, float), optional): prim translation (applied last)
rotation (tuple(float, float, float), optional): prim rotation in radians with rotation
order ZYX.
scale (tuple(float, float, float), optional): scaling factor in x, y, z.
ref (str, optional): Path to the USD that this prim will reference.
semantic_label (str, optional): Semantic label.
attributes (dict, optional): Key-value pairs of prim attributes to set.
"""
from pxr import UsdGeom, Semantics
prim = self.get_stage().DefinePrim(path, prim_type)
for k, v in attributes.items():
prim.GetAttribute(k).Set(v)
xform_api = UsdGeom.XformCommonAPI(prim)
if ref:
prim.GetReferences().AddReference(ref)
if semantic_label:
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(semantic_label)
if rotation:
xform_api.SetRotate(rotation, UsdGeom.XformCommonAPI.RotationOrderXYZ)
if scale:
xform_api.SetScale(scale)
if translation:
xform_api.SetTranslate(translation)
return prim
def set_up_axis(self, axis):
"""Change the up axis of the current stage
Args:
axis: valid values are `UsdGeom.Tokens.y`, or `UsdGeom.Tokens.z`
"""
from pxr import UsdGeom, Usd
stage = self.get_stage()
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with Usd.EditContext(stage, rootLayer):
UsdGeom.SetStageUpAxis(stage, axis)
|
NVlabs/ACID/PlushSim/scripts/data_gen_attic.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import argparse
import json
from utils import *
parser = argparse.ArgumentParser("Dataset generation")
################################################################
# save to args
parser.add_argument("--save_dir", type=str, default="/result/interaction_sequence")
parser.add_argument("--img_subdir", type=str, default='img')
parser.add_argument("--geom_subdir", type=str, default='geom')
parser.add_argument("--info_subdir", type=str, default='info')
parser.add_argument("--save_every", type=int, default=25)
################################################################
# interaction args
parser.add_argument("--num_interaction", type=int, default=18)
parser.add_argument("--reset_every", type=int, default=6)
################################################################
# scene args
parser.add_argument("--asset_root", type=str, default="/result/assets")
parser.add_argument("--scene_path", type=str, default="attic_lean/Attic_clean_v2.usda")
parser.add_argument("--plush_path", type=str, default="animals/teddy/teddy_scaled/teddy_scaled.usda")
parser.add_argument("--skip_layout_randomization", action="store_true", default=False)
parser.add_argument("--skip_lights_randomization", action="store_true", default=False)
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.img_subdir), exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.geom_subdir), exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.info_subdir), exist_ok=True)
img_dir = os.path.join(args.save_dir, args.img_subdir)
geom_dir = os.path.join(args.save_dir, args.geom_subdir)
info_dir = os.path.join(args.save_dir, args.info_subdir)
def main():
from attic_scene import attic_scene
scene_path = os.path.join(args.asset_root, args.scene_path)
plush_path = os.path.join(args.asset_root, args.plush_path)
scene = attic_scene(
scene_path,
plush_path,
RESET_STATIC=True,
RAND_LAYOUT=not args.skip_layout_randomization,
RAND_LIGHTS=not args.skip_lights_randomization,)
start_time = time.time()
# save scene overall info
with open(os.path.join(info_dir, "scene_meta.json"), 'w') as fp:
json.dump(scene.get_scene_metadata(), fp)
# number of resets
num_resets = (args.num_interaction + args.reset_every - 1) // args.reset_every
for reset in range(num_resets):
# save scene reset collider info
np.savez_compressed(os.path.join(info_dir, f"clutter_info_{reset:04d}.npz"), **scene.get_scene_background_state())
num_steps = min(args.num_interaction, (reset + 1) * args.reset_every) - reset * args.reset_every
# sample interactions
actions = {
'grasp_points':[],
'target_points':[],
'grasp_pixels':[],
'start_frames':[],
'release_frames':[],
'static_frames':[], }
# save start frame
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
for interaction in range(num_steps):
# stop simulating
scene.kit.pause()
action = scene.sample_action()
if action is None:
scene.kit.play()
continue
grasp_point, target_point, grasp_pixel = action
actions['grasp_points'].append(np.array(grasp_point,np.float16))
actions['target_points'].append(np.array(target_point,np.float16))
actions['grasp_pixels'].append(np.array(grasp_pixel,np.uint16))
actions['start_frames'].append(np.array(scene.frame,np.uint16))
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
scene.kit.play()
init_traj = scene.gripper.plan_trajectory(scene.gripper.eef_default_loc, grasp_point)
# move
for pos in init_traj:
scene.step()
scene.gripper.set_translation(tuple(pos))
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
scene.kit.pause()
#init_move_traj = scene.gripper.set_translation(grasp_point)
scene.gripper.grasp(scene.plush)
scene.kit.play()
traj = scene.gripper.plan_trajectory(grasp_point, target_point)
# move
for pos in traj:
scene.step()
scene.gripper.set_translation(tuple(pos))
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
# wait until stable
for ff in range(scene.FALL_MAX):
scene.step()
if scene.check_scene_static():
print(f"grasp reaching a resting state after {ff} steps")
break
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
actions['release_frames'].append(np.array(scene.frame,np.uint16))
# release
scene.kit.pause()
scene.gripper.ungrasp()
# TODO: delete gripper collider
scene.kit.play()
for ff in range(scene.FALL_MAX+scene.DROP_MIN):
scene.step()
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
if ff < scene.DROP_MIN:
continue
if scene.check_scene_static():
print(f"release reaching a resting state after {ff} steps")
break
scene.gripper.reset_translation()
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
actions['static_frames'].append(np.array(scene.frame,np.uint16))
np.savez_compressed(os.path.join(info_dir, f"interaction_info_{reset:04d}.npz"), **actions)
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Sampling {num_steps} interactions takes: {time_str}')
scene.reset()
# cleanup
scene.kit.shutdown()
if __name__ == "__main__":
main()
|
NVlabs/ACID/PlushSim/scripts/syntheticdata.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper class for obtaining groundtruth data from OmniKit.
Support provided for RGB, Depth, Bounding Box (2D Tight, 2D Loose, 3D),
segmentation (instance and semantic), and camera parameters.
Typical usage example:
kit = OmniKitHelper() # Start omniverse kit
sd_helper = SyntheticDataHelper()
gt = sd_helper.get_groundtruth(('rgb', 'depth', 'boundingBox2DTight'))
"""
import math
import carb
import omni
import time
from pxr import UsdGeom, Semantics, Gf
import numpy as np
class SyntheticDataHelper:
def __init__(self):
self.app = omni.kit.app.get_app_interface()
ext_manager = self.app.get_extension_manager()
ext_manager.set_extension_enabled("omni.syntheticdata", True)
from omni.syntheticdata import sensors, helpers
import omni.syntheticdata._syntheticdata as sd # Must be imported after getting app interface
self.sd = sd
self.sd_interface = self.sd.acquire_syntheticdata_interface()
self.viewport = omni.kit.viewport.get_viewport_interface()
self.carb_settings = carb.settings.acquire_settings_interface()
self.sensor_helper_lib = sensors
self.generic_helper_lib = helpers
mode = "numpy"
self.sensor_helpers = {
"rgb": sensors.get_rgb,
"depth": sensors.get_depth_linear,
"depthLinear": self.get_depth_linear,
"instanceSegmentation": sensors.get_instance_segmentation,
"semanticSegmentation": self.get_semantic_segmentation,
"boundingBox2DTight": sensors.get_bounding_box_2d_tight,
"boundingBox2DLoose": sensors.get_bounding_box_2d_loose,
"boundingBox3D": sensors.get_bounding_box_3d,
"camera": self.get_camera_params,
"pose": self.get_pose,
}
self.sensor_types = {
"rgb": self.sd.SensorType.Rgb,
"depth": self.sd.SensorType.DepthLinear,
"depthLinear": self.sd.SensorType.DepthLinear,
"instanceSegmentation": self.sd.SensorType.InstanceSegmentation,
"semanticSegmentation": self.sd.SensorType.SemanticSegmentation,
"boundingBox2DTight": self.sd.SensorType.BoundingBox2DTight,
"boundingBox2DLoose": self.sd.SensorType.BoundingBox2DLoose,
"boundingBox3D": self.sd.SensorType.BoundingBox3D,
}
self.sensor_state = {s: False for s in list(self.sensor_helpers.keys())}
def get_depth_linear(self, viewport):
""" Get Depth Linear sensor output.
Args:
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
Return:
(numpy.ndarray): A float32 array of shape (height, width, 1).
"""
sensor = self.sensor_helper_lib.create_or_retrieve_sensor(viewport, self.sd.SensorType.DepthLinear)
data = self.sd_interface.get_sensor_host_float_texture_array(sensor)
h, w = data.shape[:2]
return np.frombuffer(data, np.float32).reshape(h, w, -1)
def get_semantic_segmentation(self, viewport):
instance_data, instance_mappings = self.sensor_helpers['instanceSegmentation'](viewport, return_mapping=True)
ins_to_sem = np.zeros(np.max(instance_data)+1,dtype=np.uint8)
for im in instance_mappings[::-1]:
for i in im["instanceIds"]:
if i >= len(ins_to_sem):
continue
ins_to_sem[i] = 1 #if im['semanticLabel'] == 'teddy' else 2
return np.take(ins_to_sem, instance_data)
def get_camera_params(self, viewport):
"""Get active camera intrinsic and extrinsic parameters.
Returns:
A dict of the active camera's parameters.
pose (numpy.ndarray): camera position in world coordinates,
fov (float): horizontal field of view in radians
focal_length (float)
horizontal_aperture (float)
view_projection_matrix (numpy.ndarray(dtype=float64, shape=(4, 4)))
resolution (dict): resolution as a dict with 'width' and 'height'.
clipping_range (tuple(float, float)): Near and Far clipping values.
"""
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(viewport.get_active_camera())
prim_tf = UsdGeom.Camera(prim).GetLocalTransformation()
focal_length = prim.GetAttribute("focalLength").Get()
horiz_aperture = prim.GetAttribute("horizontalAperture").Get()
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
x_min, y_min, x_max, y_max = viewport.get_viewport_rect()
width, height = x_max - x_min, y_max - y_min
aspect_ratio = width / height
near, far = prim.GetAttribute("clippingRange").Get()
view_proj_mat = self.generic_helper_lib.get_view_proj_mat(prim, aspect_ratio, near, far)
return {
"pose": np.array(prim_tf),
"fov": fov,
"focal_length": focal_length,
"horizontal_aperture": horiz_aperture,
"view_projection_matrix": view_proj_mat,
"resolution": {"width": width, "height": height},
"clipping_range": (near, far),
}
def get_pose(self):
"""Get pose of all objects with a semantic label.
"""
stage = omni.usd.get_context().get_stage()
mappings = self.generic_helper_lib.get_instance_mappings()
pose = []
for m in mappings:
prim_path = m[0]
prim = stage.GetPrimAtPath(prim_path)
prim_tf = UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(0.0)
pose.append((str(prim_path), m[1], str(m[2]), np.array(prim_tf)))
return pose
async def initialize_async(self, viewport, sensor_types, timeout=10):
""" Initialize sensors in the list provided.
Args:
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
sensor_types (list of omni.syntheticdata._syntheticdata.SensorType): List of sensor types to initialize.
timeout (int): Maximum time in seconds to attempt to initialize sensors.
"""
start = time.time()
is_initialized = False
while not is_initialized and time.time() < (start + timeout):
sensors = []
for sensor_type in sensor_types:
sensors.append(self.sensor_helper_lib.create_or_retrieve_sensor(viewport, sensor_type))
await omni.kit.app.get_app_interface().next_update_async()
is_initialized = not any([not self.sd_interface.is_sensor_initialized(s) for s in sensors])
if not is_initialized:
unititialized = [s for s in sensors if not self.sd_interface.is_sensor_initialized(s)]
raise TimeoutError(f"Unable to initialized sensors: [{unititialized}] within {timeout} seconds.")
await omni.kit.app.get_app_interface().next_update_async() # Extra frame required to prevent access violation error
def get_groundtruth(self, gt_sensors, viewport, verify_sensor_init=True):
"""Get groundtruth from specified gt_sensors.
Args:
gt_sensors (list): List of strings of sensor names. Valid sensors names: rgb, depth,
instanceSegmentation, semanticSegmentation, boundingBox2DTight,
boundingBox2DLoose, boundingBox3D, camera
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
verify_sensor_init (bool): Additional check to verify creation and initialization of sensors.
Returns:
Dict of sensor outputs
"""
if isinstance(gt_sensors, str):
gt_sensors = (gt_sensors,)
# Create and initialize sensors
while verify_sensor_init:
flag = 0
# Render frame
self.app.update()
for sensor_name in gt_sensors:
if sensor_name != "camera" and sensor_name != "pose":
current_sensor = self.sensor_helper_lib.create_or_retrieve_sensor(
viewport, self.sensor_types[sensor_name]
)
if not self.sd_interface.is_sensor_initialized(current_sensor):
flag = 1
# Render frame
self.app.update()
self.app.update()
if flag == 0:
break
gt = {}
sensor_state = {}
# Process non-RT-only sensors
for sensor in gt_sensors:
if sensor not in ["camera", "pose"]:
if sensor == "instanceSegmentation":
gt[sensor] = self.sensor_helpers[sensor](viewport, parsed=True, return_mapping=True)
elif sensor == "boundingBox3D":
gt[sensor] = self.sensor_helpers[sensor](viewport, parsed=True, return_corners=True)
else:
gt[sensor] = self.sensor_helpers[sensor](viewport)
current_sensor = self.sensor_helper_lib.create_or_retrieve_sensor(viewport, self.sensor_types[sensor])
current_sensor_state = self.sd_interface.is_sensor_initialized(current_sensor)
sensor_state[sensor] = current_sensor_state
else:
gt[sensor] = self.sensor_helpers[sensor](viewport)
gt["state"] = sensor_state
return gt
|
NVlabs/ACID/PlushSim/scripts/attic_scene.py | import os
import cv2
import time
import random
import asyncio
import numpy as np
from python_app import OmniKitHelper
import omni
import carb
from utils import *
RESOLUTION=720
# specify a custom config
CUSTOM_CONFIG = {
"width": RESOLUTION,
"height": RESOLUTION,
"anti_aliasing": 3, # 3 for dlss, 2 for fxaa, 1 for taa, 0 to disable aa
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 128,
"max_bounces": 10,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"subdiv_refinement_level": 2,
"headless": True,
"sync_loads": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.bloky.kit',
}
"""
plush animal material: /Root/physics/stuff_animal
magic gripper: /Root/physics/magic_gripper
real object group: /Root/physics/real_objects
magic object group: /Root/physics/magic_objects
"""
class attic_scene(object):
def __init__(self,
SCENE_PATH,
PLUSH_ANIMAL_PATH,
PLUSH_SCALE=4,
FALL_MAX=300,
REST_THRESHOLD=8,
PHYSX_DT=1/150.,
SAVE_EVERY=25,
DROP_MIN=20,
RESET_STATIC=True,
RAND_LAYOUT=True,
RAND_LIGHTS=True,
ROBOT_SPEED=1.):
for k,v in locals().items():
if k != 'self':
self.__dict__[k] = v
self.plush_animal_mat = "/Root/physics/stuff_animal"
self.magic_gripper = "/Root/physics/magic_gripper"
self.fingerL = "/Root/physics/magic_gripper/fingerL"
self.fingerR = "/Root/physics/magic_gripper/fingerR"
self.real_object_group = "/Root/physics/real_objects"
self.magic_object_group = "/Root/physics/magic_objects"
self.front_path = "/Root/scene_front"
self.back_path = "/Root/scene_back"
self.scene_range = np.array([[-50*12,-50*8,0],[50*12,50*8,50*8]])
self.drop_range = np.array([[-50*self.PLUSH_SCALE,-50*self.PLUSH_SCALE,],
[50*self.PLUSH_SCALE,50*self.PLUSH_SCALE,]]) #/ 2.
self.back_clutter_range = np.array([[-50*12,50*8,],[50*12,50*12,]])
self.total_range = np.array([[-50*12,-50*12,0],[50*12,50*12,50*8]])
self.kit = OmniKitHelper(CUSTOM_CONFIG)
self.kit.set_physics_dt(physics_dt=self.PHYSX_DT)
physx_interface = omni.physx.get_physx_interface()
physx_interface.force_load_physics_from_usd()
physx_interface.reset_simulation()
async def load_stage(path):
await omni.usd.get_context().open_stage_async(path)
setup_task = asyncio.ensure_future(load_stage(SCENE_PATH))
while not setup_task.done():
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
self.stage = omni.usd.get_context().get_stage()
self.front_group = self.stage.GetPrimAtPath(self.front_path)
self.back_group = self.stage.GetPrimAtPath(self.back_path)
from syntheticdata import SyntheticDataHelper
self.sd_helper = SyntheticDataHelper()
# force RayTracedLighting mode for better performance while simulating physics
self.kit.set_setting("/rtx/rendermode", "RayTracedLighting")
# wait until all materials are loaded
print("waiting for things to load...")
# if self.kit.is_loading():
# time.sleep(10)
while self.kit.is_loading():
time.sleep(0.1)
# set up cameras
self._setup_cameras()
_viewport_api = omni.kit.viewport.get_viewport_interface()
viewport = _viewport_api.get_instance_list()[0]
self._viewport = _viewport_api.get_viewport_window(viewport)
# touch the sensors to kick in anti-aliasing
for _ in range(20):
_ = self.sd_helper.get_groundtruth(
[ "rgb","depth","instanceSegmentation","semanticSegmentation",], self._viewport)
# set up objects
self._import_plush_animal(PLUSH_ANIMAL_PATH)
self._setup_robots()
# # start off Omniverse
self.kit.play()
# store original sim and vis points for reset
self.sim_og_pts, self.vis_og_pts = self._get_plush_points()
# # stop Omniverse
# self.kit.pause()
# reset the scene
self.frame = 0
self.reset()
def step(self):
self.kit.update(self.PHYSX_DT)
self.frame += 1
return self.frame
def sample_action(self, grasp_point=None):
if grasp_point is None:
gt = self.sd_helper.get_groundtruth(
[ "rgb","depth","instanceSegmentation","semanticSegmentation",], self._viewport)
pts = get_partial_point_cloud(self._viewport, project_factor=100.)
semseg = gt['semanticSegmentation']
kernel = np.ones((2,2), np.uint8)
semseg = cv2.erode(semseg, kernel, iterations=1)
plush_pts = np.where(semseg == 1)
if len(plush_pts[0]) == 0:
return None
idx = random.randint(0,len(plush_pts[0])-1)
grasp_pixel = (plush_pts[0][idx], plush_pts[1][idx])
grasp_point = tuple(pts[grasp_pixel[0], grasp_pixel[1],:])
else:
grasp_pixel = None
target_point = self._sample_displacement_vector(grasp_point)
if target_point is None:
return None
return grasp_point, target_point, grasp_pixel
def reset(self):
self.kit.stop()
from pxr import Gf
self.frame = 0
print("Reseting plush geometry...")
self._reset_plush_geometry(self.sim_og_pts, self.vis_og_pts)
print("Finished reseting plush geometry...")
# randonly drop the plush into the scene
print("Reseting plush translation...")
self.plush_translateOp.Set(Gf.Vec3f((0.,0.,250.)))
print("Reseting plush rotation...")
def randrot():
return random.random() * 360.
rotx,roty,rotz = randrot(), randrot(), randrot()
self.plush_rotationOp.Set(rpy2quat(rotx,roty,rotz))
print("Finished reseting plush pose...")
print("Reseting scene...")
self._randomize_scene()
print("Finished reseting scene...")
self.kit.play()
# wait until stable
if self.RESET_STATIC:
print("Waiting to reach stable...")
for _ in range(self.DROP_MIN):
self.step()
for ff in range(self.FALL_MAX*6):
self.step()
if self.check_scene_static():
print(f"Initial configuration becomes static after {ff} steps")
break
print("Reset Finished")
self.frame = 0
def reset_to(self, state):
self.kit.stop()
loc = state['loc']
rot = state['rot']
sim = state['sim']
vis = state['vis']
self._reset_plush_geometry(sim, vis)
self.plush_translateOp.Set(loc)
self.plush_rotationOp.Set(rot)
self.kit.play()
def check_scene_static(self):
_,_,_,v = self._get_object_velocity_stats()
return v < self.REST_THRESHOLD
def get_scene_metadata(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
faces = sbAPI.GetSimulationIndicesAttr().Get()
return {'plush_path': self.PLUSH_ANIMAL_PATH,
'sim_faces':np.array(faces, int).tolist(),
'sim_pts':np.array(self.sim_og_pts, np.float16).tolist(),
'vis_pts':np.array(self.vis_og_pts, np.float16).tolist(),
'scene_range': self.scene_range.tolist(),
'back_clutter_range': self.back_clutter_range.tolist(),
'cam_info': self._get_camera_info()}
# background state is different per reset
def get_scene_background_state(self):
collider = {}
for p in find_immediate_children(self.front_group):
name = str(p.GetPath()).split("/")[-1]
e,f = find_collider(p)
collider[f"{name}_box"] = e
collider[f"{name}_tran"] = f
for p in find_immediate_children(self.back_group):
name = str(p.GetPath()).split("/")[-1]
e,f = find_collider(p)
collider[f"{name}_box"] = e
collider[f"{name}_tran"] = f
return collider
def get_scene_state_plush(self,raw=False,convert_to=None):
sim,vis = self._get_plush_points()
loc,rot,scale = self._get_plush_loc(),self._get_plush_rot(),self._get_plush_scale()
if not raw:
loc,rot,scale = tuple(loc),eval(str(rot)),tuple(scale)
state = {'sim':sim, 'vis':vis,
'loc':loc, 'rot':rot, 'scale':scale}
if convert_to is not None:
for k,v in state.items():
state[k] = np.array(v, convert_to)
return state
def get_observations(self,
sensors=["rgb","depth",
# "instanceSegmentation",
"semanticSegmentation",],
partial_pointcloud=False):
frame = self.sd_helper.get_groundtruth(sensors, self._viewport)
gt = {}
gt['rgb_img'] = frame['rgb'][:,:,:-1]
gt['seg_img'] = frame['semanticSegmentation']
gt['dep_img'] = frame['depth'].squeeze()
if partial_pointcloud:
gt['pxyz'] = get_partial_point_cloud(self._viewport, project_factor=100.)
return gt
################################################################
#
# Below are "private" functions ;)
#
################################################################
def _import_plush_animal(self, usda_path):
from omni.physx.scripts import physicsUtils
mesh_name = usda_path.split('/')[-1].split('.')[0]
from pxr import PhysxSchema,UsdGeom,UsdShade,Semantics
###################
# import object
abspath = carb.tokens.get_tokens_interface().resolve(usda_path)
physics_root = "/Root"
assert self.stage.DefinePrim(physics_root+f"/{mesh_name}").GetReferences().AddReference(abspath)
self.mesh_path = f"{physics_root}/{mesh_name}/{mesh_name}_obj/mesh"
self.plush= self.stage.GetPrimAtPath(self.mesh_path)
###################
# add deformable property
schema_parameters = {
"self_collision": True,
"vertex_velocity_damping": 0.005,
"sleep_damping": 10,
"sleep_threshold": 5,
"settling_threshold": 11,
"solver_position_iteration_count": 60,
"collisionRestOffset": 0.1,
"collisionContactOffset": 0.5,
"voxel_resolution": 45,
}
skin_mesh = UsdGeom.Mesh.Get(self.stage, self.mesh_path)
skin_mesh.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 300.0))
skin_mesh.AddOrientOp().Set(Gf.Quatf(0.707, 0.707, 0, 0))
skin_points = skin_mesh.GetPointsAttr().Get()
skin_indices = physicsUtils.triangulateMesh(skin_mesh)
# Create tet meshes for simulation and collision based on the skin mesh
simulation_resolution = schema_parameters["voxel_resolution"]
skin_mesh_scale = Gf.Vec3f(1.0, 1.0, 1.0)
collision_points, collision_indices = physicsUtils.create_conforming_tetrahedral_mesh(skin_points, skin_indices)
simulation_points, simulation_indices = physicsUtils.create_voxel_tetrahedral_mesh(collision_points, collision_indices, skin_mesh_scale, simulation_resolution)
# Apply PhysxDeformableBodyAPI and PhysxCollisionAPI to skin mesh and set parameter and tet meshes
deformable_body_api = PhysxSchema.PhysxDeformableBodyAPI.Apply(skin_mesh.GetPrim())
deformable_body_api.CreateSolverPositionIterationCountAttr().Set(schema_parameters['solver_position_iteration_count'])
deformable_body_api.CreateSelfCollisionAttr().Set(schema_parameters['self_collision'])
deformable_body_api.CreateCollisionIndicesAttr().Set(collision_indices)
deformable_body_api.CreateCollisionRestPointsAttr().Set(collision_points)
deformable_body_api.CreateSimulationIndicesAttr().Set(simulation_indices)
deformable_body_api.CreateSimulationRestPointsAttr().Set(simulation_points)
deformable_body_api.CreateVertexVelocityDampingAttr().Set(schema_parameters['vertex_velocity_damping'])
deformable_body_api.CreateSleepDampingAttr().Set(schema_parameters['sleep_damping'])
deformable_body_api.CreateSleepThresholdAttr().Set(schema_parameters['sleep_threshold'])
deformable_body_api.CreateSettlingThresholdAttr().Set(schema_parameters['settling_threshold'])
PhysxSchema.PhysxCollisionAPI.Apply(skin_mesh.GetPrim())
###################
# add deformable material
def add_physics_material_to_prim(stage, prim, materialPath):
bindingAPI = UsdShade.MaterialBindingAPI.Apply(prim)
materialPrim = UsdShade.Material(stage.GetPrimAtPath(materialPath))
bindingAPI.Bind(materialPrim, UsdShade.Tokens.weakerThanDescendants, "physics")
add_physics_material_to_prim(self.stage, self.plush, self.plush_animal_mat)
###################
# add collision group
physicsUtils.add_collision_to_collision_group(self.stage, self.mesh_path, self.real_object_group)
###################
# add semantic info
sem = Semantics.SemanticsAPI.Apply(self.stage.GetPrimAtPath(self.mesh_path), "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set("plush")
###################
# standarize transform
physicsUtils.setup_transform_as_scale_orient_translate(self.plush)
xform = UsdGeom.Xformable(self.plush)
ops = xform.GetOrderedXformOps()
self.plush_translateOp = ops[0]
self.plush_rotationOp = ops[1]
self.plush_scaleOp = ops[2]
scale_factor = self.PLUSH_SCALE
self.plush_scaleOp.Set((scale_factor,scale_factor,scale_factor))
def _get_object_velocity_stats(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
velocity = np.array(sbAPI.GetSimulationVelocitiesAttr().Get())
vnorm = np.linalg.norm(velocity, axis=1)
return np.percentile(vnorm, [0,50,90,99])
def _setup_robots(self):
actor = self.stage.GetPrimAtPath(self.magic_gripper)
fingerL = self.stage.GetPrimAtPath(self.fingerL)
fingerR = self.stage.GetPrimAtPath(self.fingerR)
self.gripper = magic_eef(actor,
self.stage,
eef_default_loc=(0.,0.,600.),
default_speed=self.ROBOT_SPEED,
fingerL=fingerL,
fingerR=fingerR)
def _setup_cameras(self):
from pxr import UsdGeom
stage = omni.usd.get_context().get_stage()
# Need to set this before setting viewport window size
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/width", -1)
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/height", -1)
viewport_window = omni.kit.viewport.get_default_viewport_window()
viewport_window.set_active_camera("/Root/cam_light/Camera")
viewport_window.set_texture_resolution(RESOLUTION,RESOLUTION)
viewport_window.set_window_size(RESOLUTION, RESOLUTION)
def _get_plush_loc(self):
return self.plush_translateOp.Get()
def _get_plush_rot(self):
return self.plush_rotationOp.Get()
def _get_plush_scale(self):
return self.plush_scaleOp.Get()
def _get_plush_points(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(self.plush)
sim = sbAPI.GetSimulationPointsAttr().Get()
mesh = UsdGeom.Mesh(self.plush)
vis = mesh.GetPointsAttr().Get()
return sim, vis
def _get_camera_info(self):
cam_info = {}
camera_pose, camera_intr = get_camera_params(self._viewport)
cam_name = get_camera_name(self._viewport)
cam_info[cam_name] = [camera_pose.tolist(), camera_intr.tolist()]
return cam_info
def _randomize_collection(self, collection_prim, scene_range, drop_range=None, rand_rot=True, padding=True):
extents,objs = [],[]
for p in find_immediate_children(collection_prim):
objs.append(str(p.GetPath()))
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
objects = [standardize_bbox(bbox) for bbox in np.array(extents)[:,:,:-1]]
canvas = get_canvas(scene_range)
if drop_range is not None:
fill_canvas(canvas, scene_range, drop_range)
translations = []
for b,n in zip(objects,objs):
for _ in range(3):
t = sample_bbox_translation(b, scene_range)
if padding:
tb = scale(pad_to_square(b + t))
else:
tb = b + t
if not overlaps_with_current(canvas, scene_range, tb):
fill_canvas(canvas, scene_range, tb)
translations.append((n,t))
break
if len(translations) == 0 or translations[-1][0] != n:
translations.append((n,np.array([0,-2000])))
def randrot():
return random.random() * 360.
from pxr import UsdGeom
from omni.physx.scripts import physicsUtils
for n,t in translations:
xform = UsdGeom.Xformable(self.stage.GetPrimAtPath(n))
physicsUtils.setup_transform_as_scale_orient_translate(xform)
ops = xform.GetOrderedXformOps()
translateOp = ops[0]
translateOp.Set(tuple(np.array(tuple(translateOp.Get())) + np.append(t, 0)))
if rand_rot:
orientOp = ops[1]
orientOp.Set(rpy2quat(0,0,randrot()))
def _randomize_lighting(self):
domelight = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DomeLight")
light = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DistantLight")
light1 = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DistantLight_01")
temp = np.random.rand(1)[0] * 5000 + 2500
domelight.GetAttribute('colorTemperature').Set(temp)
light.GetAttribute('colorTemperature').Set(temp)
light1.GetAttribute('colorTemperature').Set(temp)
int_range = 10000
int_min = 2500
for l in [domelight, light, light1]:
intensity = np.random.rand(1)[0] * int_range + int_min
l.GetAttribute('intensity').Set(intensity)
def _randomize_scene(self):
if self.RAND_LAYOUT:
# randomize front scene
self._randomize_collection(self.front_group, self.scene_range[:,:-1], self.drop_range)
# randomize back scene
self._randomize_collection(self.back_group, self.back_clutter_range,rand_rot=False, padding=False)
if self.RAND_LIGHTS:
# randomize lights
self._randomize_lighting()
def _get_2d_layout_occupancy_map(self):
extents = []
for p in find_immediate_children(self.front_group):
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
for p in find_immediate_children(self.back_group):
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
objects = [standardize_bbox(bbox) for bbox in np.array(extents)[:,:,:-1]]
#canvas = get_canvas(self.scene_range[:,:-1])
canvas = get_canvas(self.total_range[:,:-1])
for b in objects:
fill_canvas(canvas, self.total_range[:,:-1], b)
return canvas
def _sample_displacement_vector(self, grasp_point):
sampled_for = 0
mean_len = 160
std_len = 80
max_len = 240
min_len = 80
canvas = self._get_2d_layout_occupancy_map()
while(True):
sampled_for = sampled_for + 1
move_len = np.clip(np.random.normal(loc=mean_len,scale=std_len), min_len, max_len)
move_dir = sample_direction_zup(100).squeeze()
#move_dir[1,:] = np.abs(move_dir[1,:])
move_vec = move_dir * move_len
target_pts = grasp_point + move_vec.T
in_world = np.logical_and(
target_pts > self.total_range[0],
target_pts < self.total_range[1]).all(axis=1)
occupancies = []
try:
# assure that no obstacle is in path for length times 1.3
for i in range(int(max_len*1.3)):
temp = grasp_point + (target_pts - grasp_point) / max_len * i
temp[:,0] = np.clip(target_pts[:,0], self.total_range[0,0], self.total_range[1,0])
temp[:,1] = np.clip(target_pts[:,1], self.total_range[0,1], self.total_range[1,1])
occupancies.append(get_occupancy_value(
canvas, self.total_range[:,:-1], temp[:,:-1]))
path_no_collision = (np.array(occupancies) == 0).all(axis=0)
viable = np.logical_and(in_world, path_no_collision)
in_idx = np.nonzero(viable)[0]
except:
continue
if len(in_idx) > 0:
target_point = target_pts[np.random.choice(in_idx)]
return target_point
else:
if sampled_for > 10:
break
return None
def _reset_plush_geometry(self, sim, vis):
from pxr import PhysxSchema, Gf, Vt
# reset simulation points
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(self.plush)
sbAPI.GetSimulationPointsAttr().Set(sim)
# reset simulation points velocity
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
velocity = np.array(sbAPI.GetSimulationVelocitiesAttr().Get())
zero_velocity = np.zeros_like(velocity)
velocity_vec = Vt.Vec3fArray([Gf.Vec3f(tuple(m)) for m in zero_velocity])
sbAPI.GetSimulationVelocitiesAttr().Set(velocity_vec)
# reset visual points
mesh = UsdGeom.Mesh(self.plush)
mesh.GetPointsAttr().Set(vis) |
NVlabs/ACID/PlushSim/scripts/utils.py | import os
import math
import omni
import numpy as np
from PIL import Image
from pxr import UsdGeom, Usd, UsdPhysics, Gf
import matplotlib.pyplot as plt
################################################################
# State Saving Utils
# (Geometry)
################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def project_depth_world_space(depth_image, camera_intr, camera_pose, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False, project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
pts = world_pts.reshape([W, H, 3])
return pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
# print("cam_pts: ", cam_pts.max(axis=0), cam_pts.min(axis=0))
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_camera_params(viewport):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(viewport.get_active_camera())
prim_tf = np.array(UsdGeom.Camera(prim).GetLocalTransformation())
focal_length = prim.GetAttribute("focalLength").Get()
horiz_aperture = prim.GetAttribute("horizontalAperture").Get()
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
image_w, image_h = viewport.get_texture_resolution()
camera_focal_length = (float(image_w) / 2) / np.tan(fov/ 2)
cam_intr = np.array(
[[camera_focal_length, 0, float(image_h) / 2],
[0, camera_focal_length, float(image_w) / 2],
[0, 0, 1]])
return prim_tf.T, cam_intr
def get_partial_point_cloud(viewport, in_world_space=True, project_factor=1.):
from omni.syntheticdata import sensors
data = sensors.get_depth_linear(viewport)
h, w = data.shape[:2]
depth_data = -np.frombuffer(data, np.float32).reshape(h, w, -1)
camera_pose, camera_intr = get_camera_params(viewport)
if in_world_space:
return project_depth_world_space(depth_data.squeeze(), camera_intr, camera_pose, project_factor=project_factor)
else:
return project_depth_cam_space(depth_data.squeeze(), camera_intr, project_factor=project_factor)
def export_visual_mesh(prim, export_path, loc=None, rot=None, binarize=True):
assert prim.IsA(UsdGeom.Mesh), "prim needs to be a UsdGeom.Mesh"
mesh = UsdGeom.Mesh(prim)
points = mesh.GetPointsAttr().Get()
if binarize:
path = os.path.splitext(export_path)[0]+'.npy'
np.save(path, np.array(points, np.float16))
else:
print(export_path)
faces = np.array(mesh.GetFaceVertexIndicesAttr().Get()).reshape(-1,3) + 1
uv = mesh.GetPrimvar("st").Get()
with open(export_path, "w") as fp:
fp.write("mtllib teddy.mtl\nusemtl Material.004\n")
for x,y,z in points:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
for u,v in uv:
fp.write(f"vt {u:=.4f} {v:.4f}\n")
for i, (x,y,z) in enumerate(faces):
fp.write(f"f {x}/{i*3+1} {y}/{i*3+2} {z}/{i*3+3}\n")
def get_sim_points(prim, loc=None, rot=None):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(prim)
points = sbAPI.GetSimulationPointsAttr().Get()
if rot is not None:
points = np.array(points)
w,x,y,z = eval(str(rot))
from scipy.spatial.transform import Rotation
rot = Rotation.from_quat(np.array([x,y,z,w]))
points = rot.apply(points)
if loc is not None:
loc = np.array(tuple(loc))
points = points + loc
return points
def get_sim_faces(prim):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(prim)
faces = sbAPI.GetSimulationIndicesAttr().Get()
return faces
def export_simulation_voxels(prim, export_path, binarize=True, export_faces=False):
points = get_sim_points(prim)
if export_faces:
faces = get_sim_faces(prim)
if binarize:
path = os.path.splitext(export_path)[0]+'.npy'
if export_faces:
np.savez(path, points=np.array(points, np.float16), faces=np.array(faces, int))
else:
np.save(path, np.array(points, np.float16))
else:
with open(export_path, 'w') as fp:
for p in points:
fp.write(f"v {p[0]:.3f} {p[1]:.3f} {p[2]:.3f}\n")
if export_faces:
faces = np.array(faces, int).reshape([-1,4]) + 1
for f in faces:
fp.write(f"f {f[0]} {f[1]} {f[2]} {f[3]}\n")
def visualize_sensors(gt, save_path):
from omni.syntheticdata import visualize
# GROUNDTRUTH VISUALIZATION
# Setup a figure
fig, axes = plt.subplots(1, 3, figsize=(20, 6))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# SEMSEG
axes[2].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[2].imshow(semantic_rgb, alpha=0.7)
# Save figure
fig.savefig(save_path)
plt.close(fig)
def save_frame(frame_name, frame_data, save_dir,
save_rgb=True, save_seg=True, save_depth=True, save_partial_pointcloud=False):
if save_rgb:
rgb = frame_data['rgb_img']
Image.fromarray(rgb).save(f"{save_dir}/rgb_{frame_name}.jpg")
if save_seg:
seg= frame_data['seg_img']
sem = np.tile(seg[:,:,np.newaxis], (1,1,3)).astype(np.uint8) * 255
Image.fromarray(sem).save(f"{save_dir}/seg_{frame_name}.jpg")
if save_depth:
depth_img = Image.fromarray((frame_data['dep_img'].squeeze() * 1000).astype(np.uint16), mode='I;16').convert(mode='I')
depth_img.save(f"{save_dir}/depth_{frame_name}.png")
def save_state(state_name, state_data, save_dir):
loc, rot, sim, vis = state_data
state_dict = {}
state_dict['loc'] = np.array(tuple(loc))
state_dict['rot'] = np.array(eval(str(rot)))
state_dict['sim'] = np.array(sim)
state_dict['vis'] = np.array(vis)
np.savez(f"{save_dir}/state_{state_name}.npz", **state_dict)
################################################################
# Interaction Utils
################################################################
def sample_pick_point(partial_point_cloud, segmentation):
im_h = segmentation.shape[0]
im_w = segmentation.shape[1]
# point cloud "image" height and width
pc_h = partial_point_cloud.shape[0]
pc_w = partial_point_cloud.shape[1]
assert im_h == pc_h and im_w == pc_w, "partial_point_cloud dimension should match with that of segmentation mask"
def sample_spherical(npoints, ndim=3):
vec = np.random.randn(ndim, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec
def sample_direction(npoints):
phi = np.random.randn(npoints) * 2 * np.pi
theta = np.clip(np.random.normal(loc=np.pi / 4.,scale=np.pi / 12., size=npoints), np.pi / 6., np.pi / 2.)
x = np.cos(phi) * np.sin(theta)
z = np.sin(phi) * np.sin(theta)
y = np.cos(theta)
vec = np.vstack([x,y,z])
return vec
def sample_direction_zup(npoints):
phi = np.random.randn(npoints) * 2 * np.pi
theta = np.clip(np.random.normal(loc=np.pi / 4.,scale=np.pi / 12., size=npoints), np.pi / 6., np.pi / 2.)
x = np.cos(phi) * np.sin(theta)
y = np.sin(phi) * np.sin(theta)
z = np.cos(theta)
vec = np.vstack([x,y,z])
return vec
def interpolate(start_loc, end_loc, speed):
start_loc = np.array(start_loc)
end_loc = np.array(end_loc)
dist = np.linalg.norm(end_loc - start_loc)
chunks = dist // speed
return start_loc + np.outer(np.arange(chunks+1,dtype=float), (end_loc - start_loc) / chunks)
class magic_eef(object):
def __init__(self, end_effector, stage, eef_default_loc=None, default_speed=1,
fingerL=None, fingerR=None):
self.end_effector = end_effector
self.eef_default_loc = eef_default_loc
self.default_speed = default_speed
self.stage = stage
xform = UsdGeom.Xformable(end_effector)
self.ops = xform.GetOrderedXformOps()
assert self.ops[0].GetOpType() == UsdGeom.XformOp.TypeTranslate,\
"Code is based on UsdGeom.Xformable with first op as translation"
assert self.ops[1].GetOpType() == UsdGeom.XformOp.TypeOrient,\
"Code is based on UsdGeom.Xformable with second op as orientation"
self.attachmentPath = None
self.set_translation(eef_default_loc)
self.fingerL=fingerL
if fingerL is not None:
xform = UsdGeom.Xformable(fingerL)
self.fingerL_ops = xform.GetOrderedXformOps()[0]
self.fingerL_ops.Set((-5,0,20))
self.fingerR=fingerR
if fingerR is not None:
xform = UsdGeom.Xformable(fingerR)
self.fingerR_ops = xform.GetOrderedXformOps()[0]
self.fingerL_ops.Set((5,0,20))
def get_translation(self):
return self.ops[0].Get()
def set_translation(self, loc):
self.ops[0].Set(loc)
def reset_translation(self):
self.set_translation(self.eef_default_loc)
def get_orientation(self):
return self.ops[1].Get()
def set_orientation(self, rot):
self.ops[1].Set(rot)
def grasp(self, target_object):
# enable collision
self.end_effector.GetAttribute("physics:collisionEnabled").Set(True)
# create magic grasp
self.attachmentPath = target_object.GetPath().AppendChild("rigidAttachment_0")
omni.kit.commands.execute(
"AddSoftBodyRigidAttachmentCommand",
target_attachment_path=self.attachmentPath,
softbody_path=target_object.GetPath(),
rigidbody_path=self.end_effector.GetPath(),
)
attachmentPrim = self.stage.GetPrimAtPath(self.attachmentPath)
assert attachmentPrim
assert attachmentPrim.GetAttribute("physxEnableHaloParticleFiltering").Set(True)
assert attachmentPrim.GetAttribute("physxEnableVolumeParticleAttachments").Set(True)
assert attachmentPrim.GetAttribute("physxEnableSurfaceTetraAttachments").Set(True)
omni.physx.get_physx_interface().release_physics_objects()
self.fingerL_ops.Set((-5,0,20))
self.fingerR_ops.Set((5,0,20))
def ungrasp(self):
assert self.attachmentPath is not None, "nothing is grasped! (there is no attachment registered)"
# release magic grasp
omni.kit.commands.execute(
"DeletePrimsCommand",
paths=[self.attachmentPath]
)
self.end_effector.GetAttribute("physics:collisionEnabled").Set(False)
omni.physx.get_physx_interface().release_physics_objects()
self.attachmentPath = None
self.fingerL_ops.Set((-80,0,20))
self.fingerR_ops.Set((80,0,20))
#self.reset_translation()
def plan_trajectory(self, start_loc, end_loc, speed=None):
return interpolate(start_loc, end_loc, self.default_speed if speed is None else speed)
################################
# Random utils
################################
def get_camera_name(viewport):
stage = omni.usd.get_context().get_stage()
return stage.GetPrimAtPath(viewport.get_active_camera()).GetName()
def rpy2quat(roll,pitch,yaw):
roll*=0.5
pitch*=0.5
yaw*=0.5
cr = math.cos(roll)
cp = math.cos(pitch)
cy = math.cos(yaw)
sr = math.sin(roll)
sp = math.sin(pitch)
sy = math.sin(yaw)
cpcy = cp * cy
spsy = sp * sy
spcy = sp * cy
cpsy = cp * sy
qx = (sr * cpcy - cr * spsy)
qy = (cr * spcy + sr * cpsy)
qz = (cr * cpsy - sr * spcy)
qw = cr * cpcy + sr * spsy
return Gf.Quatf(qw,qx,qy,qz)
################################
# Scene randomization utils
################################
def is_collider(prim):
try:
return prim.GetAttribute("physics:collisionEnabled").Get()
except:
return False
def find_collider(prim):
#from pxr import UsdPhysics
primRange = iter(Usd.PrimRange(prim))
extent, transform = None, None
for p in primRange:
#if p.HasAPI(UsdPhysics.CollisionAPI):
if is_collider(p):
extent = p.GetAttribute("extent").Get()
if extent is None:
# this means that the object is a cube
extent = np.array([[-50,-50,-50],[50,50,50]])
transform = omni.usd.get_world_transform_matrix(p, Usd.TimeCode.Default())
primRange.PruneChildren()
break
return np.array(extent), np.array(transform)
def find_immediate_children(prim):
primRange = Usd.PrimRange(prim)
primPath = prim.GetPath()
immediate_children = []
for p in primRange:
if p.GetPath().GetParentPath() == primPath:
immediate_children.append(p)
return immediate_children
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
def transform_verts(verts, transform):
verts_app = np.concatenate([verts,np.ones((verts.shape[0], 1))], axis=-1)
return (verts_app @ transform)[:,:-1]
def export_quad_obj(verts, faces, export_path):
with open(export_path, 'w') as fp:
for p in verts:
fp.write(f"v {p[0]:.3f} {p[1]:.3f} {p[2]:.3f}\n")
for f in faces:
fp.write(f"f {f[0]} {f[1]} {f[2]} {f[3]}\n")
def standardize_bbox(bbox):
return np.array([bbox.min(axis=0),bbox.max(axis=0)])
def get_bbox_translation_range(bbox, scene_range):
# bbox size
size_x,size_y = bbox[1] - bbox[0]
center_range = scene_range + np.array([[size_x, size_y],[-size_x,-size_y]]) / 2
center = np.mean(bbox, axis=0)
return center_range - center
def sample_bbox_translation(bbox, scene_range):
translation_range = get_bbox_translation_range(bbox, scene_range)
sample = np.random.rand(2)
return translation_range[0] + sample * (translation_range[1] - translation_range[0])
def get_canvas(scene_range):
scene_size = scene_range[1] - scene_range[0]
scene_size = ( scene_size * 1.1 ).astype(int)
return np.zeros(scene_size)
def fill_canvas(canvas, scene_range, bbox,val=1):
canvas_center = np.array(canvas.shape) / 2
cb = (bbox - np.mean(scene_range, axis=0) + canvas_center).astype(int)
if cb[0,0] < 0 or cb[0,1] < 0:
return
h,w = canvas.shape
if cb[1,0] >= h or cb[1,1] >= w:
return
canvas[cb[0,0]:cb[1,0], cb[0,1]:cb[1,1]] = val
def get_occupancy_value(canvas, scene_range, pts):
canvas_center = np.array(canvas.shape) / 2
pts = (pts - np.mean(scene_range, axis=0) + canvas_center).astype(int)
return canvas[pts[:,0], pts[:,1]]
def overlaps_with_current(canvas, scene_range, bbox,val=0):
canvas_center = np.array(canvas.shape) / 2
cb = (bbox - np.mean(scene_range, axis=0) + canvas_center).astype(int)
return (canvas[cb[0,0]:cb[1,0], cb[0,1]:cb[1,1]] != val).any()
def pad_to_square(bbox):
size_x,size_y = (bbox[1] - bbox[0]) / 2.
center = np.mean(bbox, axis=0)
length = max(size_x,size_y)
return np.stack([center-length,center+length])
def scale(bbox,factor=1.1):
size_x,size_y = (bbox[1] - bbox[0]) / 2. *factor
center = np.mean(bbox, axis=0)
return np.stack([center-[size_x,size_y],center+[size_x,size_y]])
|
NVlabs/ACID/PlushSim/scripts/writer.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper class for writing groundtruth data offline.
"""
import atexit
import colorsys
import queue
import omni
import os
import threading
import numpy as np
from PIL import Image, ImageDraw
class DataWriter:
def __init__(self, data_dir, num_worker_threads, max_queue_size=500, sensor_settings=None):
from omni.isaac.synthetic_utils import visualization as vis
self.vis = vis
atexit.register(self.stop_threads)
self.data_dir = data_dir
# Threading for multiple scenes
self.num_worker_threads = num_worker_threads
# Initialize queue with a specified size
self.q = queue.Queue(max_queue_size)
self.threads = []
self._viewport = omni.kit.viewport.get_viewport_interface()
self.create_output_folders(sensor_settings)
def start_threads(self):
"""Start worker threads."""
for _ in range(self.num_worker_threads):
t = threading.Thread(target=self.worker, daemon=True)
t.start()
self.threads.append(t)
def stop_threads(self):
"""Waits for all tasks to be completed before stopping worker threads."""
print(f"Finish writing data...")
# Block until all tasks are done
self.q.join()
# Stop workers
for _ in range(self.num_worker_threads):
self.q.put(None)
for t in self.threads:
t.join()
print(f"Done.")
def worker(self):
"""Processes task from queue. Each tasks contains groundtruth data and metadata which is used to transform the output and write it to disk."""
while True:
groundtruth = self.q.get()
if groundtruth is None:
break
filename = groundtruth["METADATA"]["image_id"]
viewport_name = groundtruth["METADATA"]["viewport_name"]
for gt_type, data in groundtruth["DATA"].items():
if gt_type == "RGB":
self.save_image(viewport_name, gt_type, data, filename)
elif gt_type == "DEPTH":
if groundtruth["METADATA"]["DEPTH"]["NPY"]:
self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/"
np.save(self.depth_folder + filename + ".npy", data)
if groundtruth["METADATA"]["DEPTH"]["COLORIZE"]:
self.save_image(viewport_name, gt_type, data, filename)
elif gt_type == "INSTANCE":
self.save_segmentation(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"]["INSTANCE"]["WIDTH"],
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"],
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"],
groundtruth["METADATA"]["INSTANCE"]["NPY"],
)
elif gt_type == "SEMANTIC":
self.save_segmentation(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"],
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"],
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"],
groundtruth["METADATA"]["SEMANTIC"]["NPY"],
)
elif gt_type in ["BBOX2DTIGHT", "BBOX2DLOOSE"]:
self.save_bbox(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"][gt_type]["COLORIZE"],
groundtruth["DATA"]["RGB"],
groundtruth["METADATA"][gt_type]["NPY"],
)
elif gt_type == "CAMERA":
self.camera_folder = self.data_dir + "/" + str(viewport_name) + "/camera/"
np.save(self.camera_folder + filename + ".npy", data)
elif gt_type == "POSES":
self.poses_folder = self.data_dir + "/" + str(viewport_name) + "/poses/"
np.save(self.poses_folder + filename + ".npy", data)
else:
raise NotImplementedError
self.q.task_done()
def save_segmentation(
self, viewport_name, data_type, data, filename, width=1280, height=720, display_rgb=True, save_npy=True
):
self.instance_folder = self.data_dir + "/" + str(viewport_name) + "/instance/"
self.semantic_folder = self.data_dir + "/" + str(viewport_name) + "/semantic/"
# Save ground truth data locally as npy
if data_type == "INSTANCE" and save_npy:
np.save(self.instance_folder + filename + ".npy", data)
if data_type == "SEMANTIC" and save_npy:
np.save(self.semantic_folder + filename + ".npy", data)
if display_rgb:
image_data = np.frombuffer(data, dtype=np.uint8).reshape(*data.shape, -1)
num_colors = 50 if data_type == "SEMANTIC" else None
color_image = self.vis.colorize_segmentation(image_data, width, height, 3, num_colors)
# color_image = visualize.colorize_instance(image_data)
color_image_rgb = Image.fromarray(color_image, "RGB")
if data_type == "INSTANCE":
color_image_rgb.save(f"{self.instance_folder}/{filename}.png")
if data_type == "SEMANTIC":
color_image_rgb.save(f"{self.semantic_folder}/{filename}.png")
def save_image(self, viewport_name, img_type, image_data, filename):
self.rgb_folder = self.data_dir + "/" + str(viewport_name) + "/rgb/"
self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/"
if img_type == "RGB":
# Save ground truth data locally as png
rgb_img = Image.fromarray(image_data, "RGBA")
rgb_img.save(f"{self.rgb_folder}/{filename}.png")
elif img_type == "DEPTH":
# Convert linear depth to inverse depth for better visualization
image_data = image_data * 100
image_data = np.reciprocal(image_data)
# Save ground truth data locally as png
image_data[image_data == 0.0] = 1e-5
image_data = np.clip(image_data, 0, 255)
image_data -= np.min(image_data)
if np.max(image_data) > 0:
image_data /= np.max(image_data)
depth_img = Image.fromarray((image_data * 255.0).astype(np.uint8))
depth_img.save(f"{self.depth_folder}/{filename}.png")
def save_bbox(self, viewport_name, data_type, data, filename, display_rgb=True, rgb_data=None, save_npy=True):
self.bbox_2d_tight_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_tight/"
self.bbox_2d_loose_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_loose/"
# Save ground truth data locally as npy
if data_type == "BBOX2DTIGHT" and save_npy:
np.save(self.bbox_2d_tight_folder + filename + ".npy", data)
if data_type == "BBOX2DLOOSE" and save_npy:
np.save(self.bbox_2d_loose_folder + filename + ".npy", data)
if display_rgb and rgb_data is not None:
color_image = self.vis.colorize_bboxes(data, rgb_data)
color_image_rgb = Image.fromarray(color_image, "RGBA")
if data_type == "BBOX2DTIGHT":
color_image_rgb.save(f"{self.bbox_2d_tight_folder}/{filename}.png")
if data_type == "BBOX2DLOOSE":
color_image_rgb.save(f"{self.bbox_2d_loose_folder}/{filename}.png")
def create_output_folders(self, sensor_settings=None):
"""Checks if the sensor output folder corresponding to each viewport is created. If not, it creates them."""
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
if sensor_settings is None:
sensor_settings = dict()
viewports = self._viewport.get_instance_list()
viewport_names = [self._viewport.get_viewport_window_name(vp) for vp in viewports]
sensor_settings_viewport = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
"camera": {"enabled": True, "npy": True},
"poses": {"enabled": True, "npy": True},
}
for name in viewport_names:
sensor_settings[name] = copy.deepcopy(sensor_settings_viewport)
for viewport_name in sensor_settings:
viewport_folder = self.data_dir + "/" + str(viewport_name)
if not os.path.exists(viewport_folder):
os.mkdir(viewport_folder)
for sensor_name in sensor_settings[viewport_name]:
if sensor_settings[viewport_name][sensor_name]["enabled"]:
sensor_folder = self.data_dir + "/" + str(viewport_name) + "/" + str(sensor_name)
if not os.path.exists(sensor_folder):
os.mkdir(sensor_folder)
|
NVlabs/ACID/ACID/environment.yaml | name: acid_train
channels:
- conda-forge
- pytorch
- defaults
dependencies:
- cython=0.29.2
- imageio=2.4.1
- numpy=1.15.4
- numpy-base=1.15.4
- matplotlib=3.0.3
- matplotlib-base=3.0.3
- pandas=0.23.4
- pillow=5.3.0
- pyembree=0.1.4
- pytest=4.0.2
- python=3.7.10
- pytorch=1.4.0
- pyyaml=3.13
- scikit-image=0.14.1
- scipy=1.5.2
- tensorboardx=1.4
- torchvision=0.2.1
- tqdm=4.28.1
- trimesh=2.37.7
- pip
- pip:
- scikit-learn==0.24.2
- h5py==2.9.0
- plyfile==0.7
- polyscope==1.2.0
|
NVlabs/ACID/ACID/setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
import numpy
# Get the numpy include directory.
numpy_include_dir = numpy.get_include()
# Extensions
# mcubes (marching cubes algorithm)
mcubes_module = Extension(
'src.utils.libmcubes.mcubes',
sources=[
'src/utils/libmcubes/mcubes.pyx',
'src/utils/libmcubes/pywrapper.cpp',
'src/utils/libmcubes/marchingcubes.cpp'
],
language='c++',
extra_compile_args=['-std=c++11'],
include_dirs=[numpy_include_dir]
)
# mise (efficient mesh extraction)
mise_module = Extension(
'src.utils.libmise.mise',
sources=[
'src/utils/libmise/mise.pyx'
],
)
# simplify (efficient mesh simplification)
simplify_mesh_module = Extension(
'src.utils.libsimplify.simplify_mesh',
sources=[
'src/utils/libsimplify/simplify_mesh.pyx'
],
include_dirs=[numpy_include_dir]
)
# Gather all extension modules
ext_modules = [
mcubes_module,
mise_module,
simplify_mesh_module,
]
setup(
ext_modules=cythonize(ext_modules),
cmdclass={
'build_ext': BuildExtension
}
)
|
NVlabs/ACID/ACID/plush_train.py | import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
import matplotlib; matplotlib.use('Agg')
import numpy as np
import os
import argparse
import time, datetime
from src import config, data
from src.checkpoints import CheckpointIO
from collections import defaultdict
import shutil
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from src.utils import common_util
import matplotlib.pyplot as plt
from PIL import Image
# Arguments
parser = argparse.ArgumentParser(
description='Train a Plush Env dynamics model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of seconds'
'with exit code 2.')
parser.add_argument('--debug', action='store_true', help='debugging')
parser.add_argument('--eval_only', action='store_true', help='run eval only')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Set t0
t0 = time.time()
# Shorthands
out_dir = cfg['training']['out_dir']
if args.debug:
cfg['training']['batch_size'] = 2
cfg['training']['vis_n_outputs'] = 1
cfg['training']['print_every'] = 1
cfg['training']['backup_every'] = 1
cfg['training']['validate_every'] = 1
cfg['training']['visualize_every'] = 1
cfg['training']['checkpoint_every'] = 1
cfg['training']['visualize_total'] = 1
batch_size = cfg['training']['batch_size']
backup_every = cfg['training']['backup_every']
vis_n_outputs = cfg['generation']['vis_n_outputs']
exit_after = args.exit_after
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.copyfile(args.config, os.path.join(out_dir, 'config.yaml'))
# Dataset
train_loader = data.core.get_plush_loader(cfg, cfg['model']['type'], split='train')
val_loader = data.core.get_plush_loader(cfg, cfg['model']['type'], split='test')
# Model
model = config.get_model(cfg, device=device)
# Generator
generator = config.get_generator(model, cfg, device=device)
# Intialize training
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
trainer = config.get_trainer(model, optimizer, cfg, device=device)
checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
load_dict = checkpoint_io.load('model_best.pt')
except FileExistsError:
load_dict = dict()
epoch_it = load_dict.get('epoch_it', 0)
it = load_dict.get('it', 0)
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
if metric_val_best == np.inf or metric_val_best == -np.inf:
metric_val_best = -model_selection_sign * np.inf
print('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
logger = SummaryWriter(os.path.join(out_dir, 'logs'))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
# Print model
nparameters = sum(p.numel() for p in model.parameters())
print('Total number of parameters: %d' % nparameters)
print('output path: ', cfg['training']['out_dir'])
# For visualizations
data_vis_list = []
if cfg['model']['type'] == 'geom':
vis_dataset = data.core.get_geom_dataset(cfg, split='vis')
elif cfg['model']['type'] == 'combined':
vis_dataset = data.core.get_combined_dataset(cfg, split='vis')
# Build a data dictionary for visualization
np.random.seed(0)
data_idxes = np.random.randint(len(vis_dataset), size=cfg['training']['visualize_total'])
for i, id in enumerate(data_idxes):
data_vis = data.core.collate_pair_fn([vis_dataset[id]])
data_vis_list.append({'it': i, 'data': data_vis})
if args.eval_only:
eval_dict, figs = trainer.evaluate(val_loader)
metric_val = eval_dict[model_selection_metric]
for k, v in eval_dict.items():
print(f"metric {k}: {v}")
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k,v in figs.items():
fig_path = os.path.join(out_dir, 'vis', f"{k}_eval_best.png")
v.savefig(fig_path)
for data_vis in data_vis_list:
out = generator.generate_mesh(data_vis['data'])
# Get statistics
try:
mesh, stats_dict = out
except TypeError:
mesh, stats_dict = out, {}
mesh.export(os.path.join(out_dir, 'vis', f"best_{data_vis['it']}.off"))
out2 = generator.generate_pointcloud(data_vis['data'])
for i,pcloud in enumerate(out2):
ipath = os.path.join(out_dir, 'vis', f"best_{data_vis['it']}_{i}.obj")
common_util.write_pointcoud_as_obj(ipath, pcloud)
pcloud_dict = [{"title":'source'if i == 0 else 'target',
"pts": p[:,:3],
"col": None if p.shape[1] == 3 else p[:,3:]
} for i,p in enumerate(out2)]
fig = common_util.side_by_side_point_clouds(pcloud_dict)
width, height = fig.get_size_inches() * fig.get_dpi()
canvas = FigureCanvas(fig)
canvas.draw()
img_path = os.path.join(out_dir, 'vis', f"best_{data_vis['it']}.png")
Image.fromarray(
np.frombuffer(
canvas.tostring_rgb(),
dtype='uint8').reshape(int(height), int(width), 3)).save(
img_path
)
plt.close(fig)
quit()
while True:
epoch_it += 1
for batch in train_loader:
it += 1
losses = trainer.train_step(batch, it)
for k,v in losses.items():
logger.add_scalar(f'train/{k}_loss', v, it)
# Print output
if (it % print_every) == 0:
t = datetime.datetime.now()
print_str = f"[Epoch {epoch_it:04d}] it={it:04d}, time: {time.time()-t0:.3f}, "
print_str += f"{t.hour:02d}:{t.minute:02d}, "
for k,v in losses.items():
print_str += f"{k}:{v:.4f}, "
print(print_str)
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if (backup_every > 0 and (it % backup_every) == 0):
print('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation
if validate_every > 0 and (it % validate_every) == 0:
print('Running Validation')
eval_dict, figs = trainer.evaluate(val_loader)
for k,v in figs.items():
fig_path = os.path.join(out_dir, 'vis', f"{k}_{it}.png")
v.savefig(fig_path)
logger.add_figure(k, v, it)
metric_val = eval_dict[model_selection_metric]
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k, v in eval_dict.items():
print(f"metric {k}: {v}")
logger.add_scalar('val/%s' % k, v, it)
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
print('New best model (loss %.4f)' % metric_val_best)
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Visualize output
if visualize_every > 0 and (it % visualize_every) == 0:
print('Visualizing')
renders = []
for data_vis in data_vis_list:
out = generator.generate_mesh(data_vis['data'])
# Get statistics
try:
mesh, stats_dict = out
except TypeError:
mesh, stats_dict = out, {}
mesh.export(os.path.join(out_dir, 'vis', '{}_{}.off'.format(it, data_vis['it'])))
out2 = generator.generate_pointcloud(data_vis['data'])
for i,pcloud in enumerate(out2):
ipath = os.path.join(out_dir, 'vis', f"{it}_{data_vis['it']}_{i}.obj")
common_util.write_pointcoud_as_obj(ipath, pcloud)
name_dict = ['source', 'target', 'source_rollout', 'target_rollout']
pcloud_dict = [{"title":name_dict[i],
"pts": p[:,:3],
"col": None if p.shape[1] == 3 else p[:,3:]
} for i,p in enumerate(out2)]
fig = common_util.side_by_side_point_clouds(pcloud_dict)
width, height = fig.get_size_inches() * fig.get_dpi()
canvas = FigureCanvas(fig)
canvas.draw()
img_path = os.path.join(out_dir, 'vis', f"{it}_{data_vis['it']}.png")
Image.fromarray(
np.frombuffer(
canvas.tostring_rgb(),
dtype='uint8').reshape(int(height), int(width), 3)).save(
img_path
)
plt.close(fig)
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
print('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
exit(3)
|
NVlabs/ACID/ACID/README.md | [![NVIDIA Source Code License](https://img.shields.io/badge/license-NSCL-blue.svg)](https://github.com/NVlabs/ACID/blob/master/LICENSE)
![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg)
# ACID model
<div style="text-align: center">
<img src="../_media/model_figure.png" width="600"/>
</div>
## Prerequisites
We use anaconda to manage necessary packages. You can create an anaconda environment called `acid_train` using
```bash
conda env create -f environment.yaml
conda activate acid_train
pip install torch-scatter==2.0.4 -f https://pytorch-geometric.com/whl/torch-1.4.0+cu101.html
```
Next, we need to compile extension modules used for mesh utilies, which are from [Convolutional Occupancy Network](https://github.com/autonomousvision/convolutional_occupancy_networks).
You can do this via
```
python setup.py build_ext --inplace
```
## Get Raw Manipulation Data
You can obtain our pre-generated manipulation trajectories from [PlushSim](../PlushSim/) from this [Google Drive](https://drive.google.com/drive/folders/1wOIk58e3wCfgOeYFBC1caYP2KAoFijbW?usp=sharing) directory. The manipulation trajectories are broken down to 10GB chunks. We recommend using [`gdown`](https://github.com/wkentaro/gdown) for downloading.
After downloading, please run the following commands to decompress the data:
```
cat data_plush.zip.part-* > data_plush.zip
unzip data_plush.zip
```
You should have the following folder structure:
```
ACID/
data_plush/
metadata/
split1/
...
split2/
...
split3/
...
split1/
...
split2/
...
split3/
...
```
### Generating Training Data
To generate input-output pairs for ACID training, you need to run the following scripts to generate the data:
```
cd preprocess
python gen_data_flow_plush.py
python gen_data_flow_splits.py
python gen_data_contrastive_pairs_flow.py
```
This should create `train_data` directory inside this folder, with the following structure:
```
ACID/
train_data/
flow/
split1/
split2/
split3/
train.pkl
test.pkl
pair/
split1/
split2/
split3/
```
If you wish to generate the data at another location, you can pass in different flags. Check out each preprocess script for details.
## Training
Finally, to train the ACID model from scratch, run:
```
python plush_train.py configs/plush_dyn_geodesics.yaml
```
For available training options, please take a look at `configs/default.yaml` and `configs/plush_dyn_geodesics.yaml`.
### Pretrained Weights
You can download pretrained weights on [Google Drive](https://drive.google.com/file/d/15ClJpMx8LlgPHXp1EeCP3Z4kD5h5bDKl/view?usp=sharing), please save `model_best.pt` to `result/geodesics/`.
## License
Please check the [LICENSE](../LICENSE) file. ACID may be used non-commercially, meaning for research or evaluation purposes only. For business inquiries, please contact [email protected].
If you find our code or paper useful, please consider citing
```bibtex
@article{shen2022acid,
title={ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation},
author={Shen, Bokui and Jiang, Zhenyu and Choy, Christopher and J. Guibas, Leonidas and Savarese, Silvio and Anandkumar, Anima and Zhu, Yuke},
journal={Robotics: Science and Systems (RSS)},
year={2022}
}
``` |
NVlabs/ACID/ACID/src/training.py | import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
|
NVlabs/ACID/ACID/src/common.py | # import multiprocessing
import torch
import numpy as np
import math
import numpy as np
def compute_iou(occ1, occ2):
''' Computes the Intersection over Union (IoU) value for two sets of
occupancy values.
Args:
occ1 (tensor): first set of occupancy values
occ2 (tensor): second set of occupancy values
'''
occ1 = np.asarray(occ1)
occ2 = np.asarray(occ2)
# Put all data in second dimension
# Also works for 1-dimensional data
if occ1.ndim >= 2:
occ1 = occ1.reshape(occ1.shape[0], -1)
if occ2.ndim >= 2:
occ2 = occ2.reshape(occ2.shape[0], -1)
# Convert to boolean values
occ1 = (occ1 >= 0.5)
occ2 = (occ2 >= 0.5)
# Compute IOU
area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)
area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)
iou = (area_intersect / area_union)
return iou
def chamfer_distance(points1, points2, give_id=False):
''' Returns the chamfer distance for the sets of points.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
use_kdtree (bool): whether to use a kdtree
give_id (bool): whether to return the IDs of nearest points
'''
return chamfer_distance_naive(points1, points2)
def chamfer_distance_naive(points1, points2):
''' Naive implementation of the Chamfer distance.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
'''
assert(points1.size() == points2.size())
batch_size, T, _ = points1.size()
points1 = points1.view(batch_size, T, 1, 3)
points2 = points2.view(batch_size, 1, T, 3)
distances = (points1 - points2).pow(2).sum(-1)
chamfer1 = distances.min(dim=1)[0].mean(dim=1)
chamfer2 = distances.min(dim=2)[0].mean(dim=1)
chamfer = chamfer1 + chamfer2
return chamfer
def make_3d_grid(bb_min, bb_max, shape):
''' Makes a 3D grid.
Args:
bb_min (tuple): bounding box minimum
bb_max (tuple): bounding box maximum
shape (tuple): output shape
'''
size = shape[0] * shape[1] * shape[2]
pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])
pys = torch.linspace(bb_min[1], bb_max[1], shape[1])
pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])
pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)
pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)
pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)
p = torch.stack([pxs, pys, pzs], dim=1)
return p
def transform_points(points, transform):
''' Transforms points with regard to passed camera information.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
assert(points.size(2) == 3)
assert(transform.size(1) == 3)
assert(points.size(0) == transform.size(0))
if transform.size(2) == 4:
R = transform[:, :, :3]
t = transform[:, :, 3:]
points_out = points @ R.transpose(1, 2) + t.transpose(1, 2)
elif transform.size(2) == 3:
K = transform
points_out = points @ K.transpose(1, 2)
return points_out
def b_inv(b_mat):
''' Performs batch matrix inversion.
Arguments:
b_mat: the batch of matrices that should be inverted
'''
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def project_to_camera(points, transform):
''' Projects points to the camera plane.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
p_camera = transform_points(points, transform)
p_camera = p_camera[..., :2] / p_camera[..., 2:]
return p_camera
def fix_Rt_camera(Rt, loc, scale):
''' Fixes Rt camera matrix.
Args:
Rt (tensor): Rt camera matrix
loc (tensor): location
scale (float): scale
'''
# Rt is B x 3 x 4
# loc is B x 3 and scale is B
batch_size = Rt.size(0)
R = Rt[:, :, :3]
t = Rt[:, :, 3:]
scale = scale.view(batch_size, 1, 1)
R_new = R * scale
t_new = t + R @ loc.unsqueeze(2)
Rt_new = torch.cat([R_new, t_new], dim=2)
assert(Rt_new.size() == (batch_size, 3, 4))
return Rt_new
def normalize_coordinate(p, padding=0.1, plane='xz'):
''' Normalize coordinate to [0, 1] for unit cube experiments
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
plane (str): plane feature type, ['xz', 'xy', 'yz']
'''
if plane == 'xz':
xy = p[:, :, [0, 2]]
elif plane =='xy':
xy = p[:, :, [0, 1]]
else:
xy = p[:, :, [1, 2]]
xy_new = xy / (1 + padding + 10e-6) # (-0.5, 0.5)
xy_new = xy_new + 0.5 # range (0, 1)
# f there are outliers out of the range
if xy_new.max() >= 1:
xy_new[xy_new >= 1] = 1 - 10e-6
if xy_new.min() < 0:
xy_new[xy_new < 0] = 0.0
return xy_new
def normalize_3d_coordinate(p, padding=0.1):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
p_nor = p / (1 + padding + 10e-4) # (-0.5, 0.5)
p_nor = p_nor + 0.5 # range (0, 1)
# f there are outliers out of the range
if p_nor.max() >= 1:
p_nor[p_nor >= 1] = 1 - 10e-4
if p_nor.min() < 0:
p_nor[p_nor < 0] = 0.0
return p_nor
def normalize_coord(p, vol_range, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments
Args:
p (tensor): point
vol_range (numpy array): volume boundary
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
p[:, 0] = (p[:, 0] - vol_range[0][0]) / (vol_range[1][0] - vol_range[0][0])
p[:, 1] = (p[:, 1] - vol_range[0][1]) / (vol_range[1][1] - vol_range[0][1])
p[:, 2] = (p[:, 2] - vol_range[0][2]) / (vol_range[1][2] - vol_range[0][2])
if plane == 'xz':
x = p[:, [0, 2]]
elif plane =='xy':
x = p[:, [0, 1]]
elif plane =='yz':
x = p[:, [1, 2]]
else:
x = p
return x
def coordinate2index(x, reso, coord_type='2d'):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
x (tensor): coordinate
reso (int): defined resolution
coord_type (str): coordinate type
'''
x = (x * reso).long()
if coord_type == '2d': # plane
index = x[:, :, 0] + reso * x[:, :, 1]
elif coord_type == '3d': # grid
index = x[:, :, 0] + reso * (x[:, :, 1] + reso * x[:, :, 2])
index = index[:, None, :]
return index
def coord2index(p, vol_range, reso=None, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments.
Corresponds to our 3D model
Args:
p (tensor): points
vol_range (numpy array): volume boundary
reso (int): defined resolution
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
# normalize to [0, 1]
x = normalize_coord(p, vol_range, plane=plane)
if isinstance(x, np.ndarray):
x = np.floor(x * reso).astype(int)
else: #* pytorch tensor
x = (x * reso).long()
if x.shape[1] == 2:
index = x[:, 0] + reso * x[:, 1]
index[index > reso**2] = reso**2
elif x.shape[1] == 3:
index = x[:, 0] + reso * (x[:, 1] + reso * x[:, 2])
index[index > reso**3] = reso**3
return index[None]
def update_reso(reso, depth):
''' Update the defined resolution so that UNet can process.
Args:
reso (int): defined resolution
depth (int): U-Net number of layers
'''
base = 2**(int(depth) - 1)
if ~(reso / base).is_integer(): # when this is not integer, U-Net dimension error
for i in range(base):
if ((reso + i) / base).is_integer():
reso = reso + i
break
return reso
def decide_total_volume_range(query_vol_metric, recep_field, unit_size, unet_depth):
''' Update the defined resolution so that UNet can process.
Args:
query_vol_metric (numpy array): query volume size
recep_field (int): defined the receptive field for U-Net
unit_size (float): the defined voxel size
unet_depth (int): U-Net number of layers
'''
reso = query_vol_metric / unit_size + recep_field - 1
reso = update_reso(int(reso), unet_depth) # make sure input reso can be processed by UNet
input_vol_metric = reso * unit_size
p_c = np.array([0.0, 0.0, 0.0]).astype(np.float32)
lb_input_vol, ub_input_vol = p_c - input_vol_metric/2, p_c + input_vol_metric/2
lb_query_vol, ub_query_vol = p_c - query_vol_metric/2, p_c + query_vol_metric/2
input_vol = [lb_input_vol, ub_input_vol]
query_vol = [lb_query_vol, ub_query_vol]
# handle the case when resolution is too large
if reso > 10000:
reso = 1
return input_vol, query_vol, reso
def add_key(base, new, base_name, new_name, device=None):
''' Add new keys to the given input
Args:
base (tensor): inputs
new (tensor): new info for the inputs
base_name (str): name for the input
new_name (str): name for the new info
device (device): pytorch device
'''
if (new is not None) and (isinstance(new, dict)):
if device is not None:
for key in new.keys():
new[key] = new[key].to(device)
base = {base_name: base,
new_name: new}
return base
class map2local(object):
''' Add new keys to the given input
Args:
s (float): the defined voxel size
pos_encoding (str): method for the positional encoding, linear|sin_cos
'''
def __init__(self, s, pos_encoding='linear'):
super().__init__()
self.s = s
self.pe = positional_encoding(basis_function=pos_encoding)
def __call__(self, p):
p = torch.remainder(p, self.s) / self.s # always possitive
# p = torch.fmod(p, self.s) / self.s # same sign as input p!
p = self.pe(p)
return p
class positional_encoding(object):
''' Positional Encoding (presented in NeRF)
Args:
basis_function (str): basis function
'''
def __init__(self, basis_function='sin_cos'):
super().__init__()
self.func = basis_function
L = 10
freq_bands = 2.**(np.linspace(0, L-1, L))
self.freq_bands = freq_bands * math.pi
def __call__(self, p):
if self.func == 'sin_cos':
out = []
p = 2.0 * p - 1.0 # chagne to the range [-1, 1]
for freq in self.freq_bands:
out.append(torch.sin(freq * p))
out.append(torch.cos(freq * p))
p = torch.cat(out, dim=2)
return p
|
NVlabs/ACID/ACID/src/config.py | import yaml
from torchvision import transforms
from src import data
from src import conv_onet
method_dict = {
'conv_onet': conv_onet
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
|
NVlabs/ACID/ACID/src/__init__.py | |
NVlabs/ACID/ACID/src/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') |
NVlabs/ACID/ACID/src/layers.py | import torch
import torch.nn as nn
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx |
NVlabs/ACID/ACID/src/conv_onet/training.py | import os
import numpy as np
import torch
from torch.nn import functional as F
from src.common import compute_iou
from src.utils import common_util, plushsim_util
from src.training import BaseTrainer
from sklearn.metrics import roc_curve
from scipy import interp
import matplotlib.pyplot as plt
from collections import defaultdict
from tqdm import tqdm
from src.utils.plushsim_util import find_nn_cpu, find_emd_cpu
class PlushTrainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, cfg, device=None, vis_dir=None, ):
self.model = model
self.optimizer = optimizer
self.device = device
self.vis_dir = vis_dir
self.threshold = cfg['test']['threshold']
self.pos_weight = torch.FloatTensor([cfg['training']['pos_weight']]).to(device)
if 'corr_dim' in cfg['model']['decoder_kwargs'] and cfg['model']['decoder_kwargs']['corr_dim'] > 0:
self.contrastive_threshold = cfg['loss']['contrastive_threshold']
self.use_geodesics = cfg['loss']['use_geodesics']
self.loss_type = cfg['loss']['type']
self.contrastive_coeff_neg = cfg['loss'].get('contrastive_coeff_neg', 1.)
self.contrastive_neg_thres = cfg['loss'].get('contrastive_neg_thres', 1.)
self.contrastive_coeff_pos = cfg['loss'].get('contrastive_coeff_pos', 1.)
self.contrastive_pos_thres= cfg['loss'].get('contrastive_pos_thres', 0.1)
self.scale_with_geodesics = cfg['loss'].get('scale_with_geodesics', False)
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
self.max_thres = 0.2
self.discretization = 1000
self.base_fpr = np.linspace(0,1,101)
self.base_thres = np.linspace(0,self.max_thres,self.discretization)
def train_step(self, data, it):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
losses = self.compute_loss(data, it)
loss = 0
for v in losses.values():
loss += v
loss.backward()
self.optimizer.step()
return {k:v.item() for k,v in losses.items()}
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
agg_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict, agg_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
for k, v in agg_step_dict.items():
agg_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
# - shape completion ROC
figs = {}
if 'tpr' in agg_list:
figs['OCC_ROC'] = self._get_shape_completion_ROC(agg_list['tpr'])
if 'fmr_hits' in agg_list:
fmr = np.array(agg_list['fmr_hits'])
idx01 = int(0.01 * (self.discretization-1) / self.max_thres)
idx02 = int(0.02 * (self.discretization-1) / self.max_thres)
idx05 = int(0.05 * (self.discretization-1) / self.max_thres)
idx10 = int(0.10 * (self.discretization-1) / self.max_thres)
eval_dict['FMR.01m_5%'] = np.mean(fmr[:,idx01] > 0.05)
eval_dict['FMR.02m_5%'] = np.mean(fmr[:,idx02] > 0.05)
eval_dict['FMR.05m_5%'] = np.mean(fmr[:,idx05] > 0.05)
eval_dict['FMR.10m_5%'] = np.mean(fmr[:,idx10] > 0.05)
fmr_std = fmr.std(axis=0)
eval_dict['FMR.01m_5%_std'] = fmr_std[idx01]
eval_dict['FMR.02m_5%_std'] = fmr_std[idx02]
eval_dict['FMR.05m_5%_std'] = fmr_std[idx05]
eval_dict['FMR.10m_5%_std'] = fmr_std[idx10]
for tau2 in np.linspace(0.01,0.2,5):
figs[f'FMR_tau1_wrt_tau2={tau2:.3f}']= self._get_FMR_curve_tau1(fmr, tau2=tau2)
figs['FMR_tau1']= self._get_FMR_curve_tau1(fmr)
for tau1 in np.linspace(0.01,0.1,5):
figs[f'FMR_tau2_wrt_tau1={tau1:.3f}']= self._get_FMR_curve_tau2(fmr, tau1=tau1)
#ax.scatter(fpr, tpr, s=100, alpha=0.5, color="blue")
if 'pair_dist' in agg_list:
all_dists = np.concatenate(agg_list['pair_dist'])
eval_dict['pair_dist'] = all_dists.mean()
eval_dict['pair_dist_std'] = all_dists.std()
figs['dist_hist'] = self._get_pair_distance_histogram(all_dists)
return eval_dict, figs
def _get_pair_distance_histogram(self, all_dists):
fig, ax = plt.subplots(figsize=(10,7))
counts, bins, patches = ax.hist(all_dists, density=True, bins=40) # density=False would make counts
ax.set_ylabel('Density')
ax.set_xlabel('Pair Distance')
return fig
def _get_shape_completion_ROC(self, tpr):
tprs = np.array(tpr)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = np.maximum(mean_tprs - std, 0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_fpr, mean_tprs, 'b')
ax.fill_between(self.base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
ax.plot([0, 1], [0, 1],'r--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('True Positive Rate')
ax.set_xlabel('False Positive Rate')
return fig
def _get_FMR_curve_tau2(self, fmrs, tau1=0.1):
idx05 = int(tau1 * (self.discretization-1) / self.max_thres)
# fix tau 1
means = []
tau1_min = 0.001
tau1_max = 0.25
tau1_ticks = np.linspace(tau1_min, tau1_max, 1000)
for t in tau1_ticks:
means.append(np.mean(fmrs[:,idx05] > t, axis=0))
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(tau1_ticks, means, 'b')
ax.set_xlim([tau1_min, tau1_max])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Ratio threshold')
return fig
def _get_FMR_curve_tau1(self, fmrs, tau2=0.05):
# tau2 = 0.05 is the inlier ratio
# fix tau 2
mean_fmrs = np.mean(fmrs > tau2, axis=0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_thres, mean_fmrs, 'b')
ax.set_xlim([0.0, self.max_thres])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Distance Threshold')
return fig
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
eval_dict = {}
agg = {}
idx = data['idx'].item()
# Compute iou
with torch.no_grad():
outputs = self.model(data)
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
eval_dict[f'iou_{self.threshold}'] = iou
occ_iou_hat_np_2 = (outputs['occ'].probs >= 0.5).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_2).mean()
eval_dict['iou_0.5'] = iou
intermediate = (self.threshold + 0.5) / 2
occ_iou_hat_np_3 = (outputs['occ'].probs >= intermediate).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_3).mean()
eval_dict[f'iou_{intermediate}'] = iou
if 'flow' in outputs:
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
constant = torch.from_numpy(np.array((12.,12.,4.)) / 10. / (1.1,1.1,1.1)).float().cuda()
loss_flow = F.mse_loss(
outputs['flow'] * constant,
gt_flow * constant,
reduction='none')
eval_dict['flow_all_field'] = loss_flow.sum(-1).mean().item()
loss_flow_np = loss_flow.sum(-1).cpu().numpy()
loss_flow_pos = loss_flow_np[occ_iou_np]
# if empty scene, no flow of the object will be present
if len(loss_flow_pos) > 0:
eval_dict['flow'] = loss_flow_pos.mean()
gt_pts = data['sampled_pts'].reshape([B*2, N, 3]).cpu().numpy()
if 'flow' in outputs:
flow_vis_mean = []
for i in range(B*2):
gt_occ_pts = gt_pts[i][occ_iou_np[i]] * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
vis_idx = plushsim_util.render_points(gt_occ_pts,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
vis_pts = gt_occ_pts[vis_idx]
flow_vis_mean.append(loss_flow_np[i][occ_iou_np[i]][vis_idx].mean())
eval_dict['flow_only_vis'] = np.mean(flow_vis_mean)
if idx % 10000 == 9999:
# do expensive evaluations
# occupancy ROC curve
fpr, tpr, _ = roc_curve(occ_iou_np.flatten(),
outputs['occ'].probs.cpu().numpy().flatten())
base_fpr = np.linspace(0, 1, 101)
tpr = interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
agg['tpr'] = tpr
f1 = []
for i in range(B*2):
gt_occ_pts = common_util.subsample_points(gt_pts[i][occ_iou_np[i]], return_index=False)
pred_pts = common_util.subsample_points(gt_pts[i][occ_iou_hat_np[i]], return_index=False)
f1.append(common_util.f1_score(pred_pts, gt_occ_pts))
f1 = np.array(f1)
f1score, precision, recall = f1.mean(axis=0)
eval_dict['f1'] = f1score
eval_dict['precision'] = precision
eval_dict['recall'] = recall
if 'corr' in outputs:
# data prep corr
corr_f = outputs['corr']
num_pairs = corr_f.shape[1]
gt_match = np.arange(num_pairs)
src_f = corr_f[0].cpu().numpy()
tgt_f = corr_f[1].cpu().numpy()
# data prep pts
pts = data['sampled_pts'].cpu().numpy().squeeze()
src_pts = pts[0][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
tgt_pts = pts[1][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
# normalize points to maximum length of 1.
tgt_pts = tgt_pts / np.ptp(tgt_pts, axis=0).max()
_, nn_inds_st = find_emd_cpu(src_f, tgt_f)
# doing Feature-match recall.
eval_dict['match_exact'] = np.mean(gt_match == nn_inds_st)
dist_st = np.linalg.norm(tgt_pts - tgt_pts[nn_inds_st], axis=1)
eval_dict['match_0.05'] = np.mean(dist_st < 0.05)
eval_dict['match_0.1'] = np.mean(dist_st < 0.1)
hits = np.array([np.mean(dist_st < f) for f in self.base_thres])
agg['fmr_hits'] = hits
agg['pair_dist'] = dist_st
return eval_dict, agg
def compute_loss(self, data, it):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
for k,v in data.items():
data[k] = v.to(device)
outputs = self.model(data)
loss = {}
eval_dict = {}
# Occupancy Loss
if 'occ' in outputs:
# gt points
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
# pred
logits = outputs['occ'].logits
loss_i = F.binary_cross_entropy_with_logits(
logits, gt_occ, reduction='none', pos_weight=self.pos_weight)
loss['occ'] = loss_i.mean()
# eval infos
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
if 'flow' in outputs :
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
mask = (gt_occ > 0.5).bool()
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
flow_gt_0 = gt_flow[~mask]
flow_gt_1 = gt_flow[mask]
flow_pred = outputs['flow']
flow_pred_0 = flow_pred[~mask]
flow_pred_1 = flow_pred[mask]
loss['flow'] = F.mse_loss(flow_pred_1, flow_gt_1) + 0.01 * F.mse_loss(flow_pred_0, flow_gt_0)
if 'corr' in outputs:
dist_vec = data['geo_dists']
corr_f = outputs['corr']
src_f = corr_f[0]
src_pos = src_f[dist_vec <= self.contrastive_threshold]
num_positive = (dist_vec <= self.contrastive_threshold).sum()
tgt_f = corr_f[1]
tgt_pos = tgt_f[dist_vec <= self.contrastive_threshold]
if self.loss_type == "contrastive":
if num_positive > 0:
src_neg = src_f[dist_vec > self.contrastive_threshold]
tgt_neg = tgt_f[dist_vec > self.contrastive_threshold]
# Positive loss
pos_loss = F.relu(((src_pos - tgt_pos).pow(2).sum(1) + 1e-4).sqrt()
- self.contrastive_pos_thres).pow(2)
pos_loss_mean = pos_loss.mean()
loss['contrastive_pos'] = self.contrastive_coeff_pos * pos_loss_mean
# Negative loss
neg_dist = (dist_vec[dist_vec > self.contrastive_threshold]
/ self.contrastive_threshold).log() + 1.
neg_dist = torch.clamp(neg_dist, max=2)
neg_loss = F.relu(neg_dist -
((src_neg - tgt_neg).pow(2).sum(1) + 1e-4).sqrt()).pow(2)
if self.scale_with_geodesics:
neg_loss = neg_loss / neg_dist
neg_loss_mean = neg_loss.mean()
loss['contrastive_neg'] = self.contrastive_coeff_neg * neg_loss_mean
return loss
|
NVlabs/ACID/ACID/src/conv_onet/config.py | import os
from src.encoder import encoder_dict
from src.conv_onet import models, training
from src.conv_onet import generation
from src import data
def get_model(cfg,device=None, dataset=None, **kwargs):
if cfg['model']['type'] == 'geom':
return get_geom_model(cfg,device,dataset)
elif cfg['model']['type'] == 'combined':
return get_combined_model(cfg,device,dataset)
def get_combined_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
act_dim = cfg['data']['act_dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'combined_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim,
c_per_dim=obj_c_dim+env_c_dim,
c_act_dim=obj_c_dim+env_c_dim,
padding=padding,
**decoder_kwargs
)
obj_per_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
obj_act_encoder = encoder_dict[encoder](
dim=act_dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvImpDyn(
obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=device
)
return model
def get_geom_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'geom_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim, c_dim=obj_c_dim+env_c_dim, padding=padding,
**decoder_kwargs
)
obj_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvOccGeom(
obj_encoder, env_encoder, decoder, device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
trainer = training.PlushTrainer(
model, optimizer, cfg,
device=device,
vis_dir=vis_dir )
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
padding=cfg['data']['padding'],
vol_info = None,
vol_bound = None,
)
return generator
|
NVlabs/ACID/ACID/src/conv_onet/__init__.py | from src.conv_onet import (
config, generation, training, models
)
__all__ = [
config, generation, training, models
]
|
NVlabs/ACID/ACID/src/conv_onet/generation.py | import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange, tqdm
import trimesh
from src.utils import libmcubes, common_util
from src.common import make_3d_grid, normalize_coord, add_key, coord2index
from src.utils.libmise import MISE
import time
import math
counter = 0
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
input_type (str): type of input
vol_info (dict): volume infomation
vol_bound (dict): volume boundary
simplify_nfaces (int): number of faces the mesh should be simplified to
'''
def __init__(self, model, points_batch_size=100000,
threshold=0.5, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
input_type = None,
vol_info = None,
vol_bound = None,
simplify_nfaces=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.input_type = input_type
self.padding = padding
self.sample = sample
self.simplify_nfaces = simplify_nfaces
# for pointcloud_crop
self.vol_bound = vol_bound
if vol_info is not None:
self.input_vol, _, _ = vol_info
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
if type(c) is tuple:
for cs in c:
for k,v in cs.items():
cs[k] = v[0].unsqueeze(0)
else:
for k,v in c.items():
c[k] = v[0].unsqueeze(0)
stats_dict['time (encode inputs)'] = time.time() - t0
mesh = self.generate_from_latent(c, stats_dict=stats_dict)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
mesh = self.extract_mesh(value_grid, c, stats_dict=stats_dict)
return mesh
def eval_points(self, p, c=None, vol_bound=None, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
c (tensor): encoded feature volumes
'''
p_split = torch.split(p, self.points_batch_size)
occ_hats = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
occ_hat = self.model.eval_points(pi, c, **kwargs)['occ'].logits
occ_hats.append(occ_hat.squeeze(0).detach().cpu())
occ_hat = torch.cat(occ_hats, dim=0)
return occ_hat
def extract_mesh(self, occ_hat, c=None, stats_dict=dict()):
''' Extracts the mesh from the predicted occupancy grid.
Args:
occ_hat (tensor): value grid of occupancies
c (tensor): encoded feature volumes
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# # Undo padding
vertices -= 1
if self.vol_bound is not None:
# Scale the mesh back to its original metric
bb_min = self.vol_bound['query_vol'][:, 0].min(axis=0)
bb_max = self.vol_bound['query_vol'][:, 1].max(axis=0)
mc_unit = max(bb_max - bb_min) / (self.vol_bound['axis_n_crop'].max() * self.resolution0*2**self.upsampling_steps)
vertices = vertices * mc_unit + bb_min
else:
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
# Create mesh
mesh = trimesh.Trimesh(vertices / (1., 1., 3), triangles,
vertex_normals=None,
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
from src.utils.libsimplify import simplify_mesh
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
self.refine_mesh(mesh, occ_hat, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def generate_pointcloud(self, data, threshold=0.75, use_gt_occ=False):
self.model.eval()
device = self.device
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
pts = data['sampled_pts']
B,_,N,C = pts.shape
pts = pts.reshape([B*2,N,C])
p_split = torch.split(pts, self.points_batch_size, dim=-1)
occ_hats = []
features = []
flows = []
for pi in p_split:
with torch.no_grad():
outputs = self.model.eval_points(pi, c)
occ_hats.append((outputs['occ'].probs > threshold).detach().cpu())
if 'corr' in outputs:
features.append(outputs['corr'].detach().cpu())
if 'flow' in outputs:
flows.append(outputs['flow'].detach().cpu())
pts = pts.cpu().numpy()
occ_hat = torch.cat(occ_hats, dim=1).numpy()
if use_gt_occ:
occ_hat = data['sampled_occ'].reshape([B*2, N]).cpu().numpy()
pos_pts0 = pts[0][occ_hat[0] == 1.].reshape((-1,3))
pos_idx0 = common_util.subsample_points(pos_pts0, resolution=0.013)
pos_pts0 = pos_pts0[pos_idx0]
pos_pts1 = pts[1][occ_hat[1] == 1.].reshape((-1,3))
pos_idx1 = common_util.subsample_points(pos_pts1, resolution=0.013)
pos_pts1 = pos_pts1[pos_idx1]
pos_pts = np.concatenate([pos_pts0, pos_pts1], axis=0) / (1.,1.,3.)
if len(features) != 0:
feature = torch.cat(features, dim=1).numpy()
f_dim = feature.shape[-1]
pos_f0 = feature[0][occ_hat[0] == 1.].reshape((-1,f_dim))
pos_f1 = feature[1][occ_hat[1] == 1.].reshape((-1,f_dim))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pos_f = np.concatenate([pos_f0, pos_f1], axis=0)
if pos_f.shape[0] < 100:
pcloud_both = pos_pts
else:
tsne_result = common_util.embed_tsne(pos_f)
colors = common_util.get_color_map(tsne_result)
pcloud_both = np.concatenate([pos_pts, colors], axis=1)
else:
pcloud_both = pos_pts
pcloud0 = pcloud_both[:pos_pts0.shape[0]]
pcloud1 = pcloud_both[pos_pts0.shape[0]:]
if len(flows) != 0:
flow = torch.cat(flows, dim=1).numpy() / 10.
pos_f0 = flow[0][occ_hat[0] == 1.].reshape((-1,3))
pos_f1 = flow[1][occ_hat[1] == 1.].reshape((-1,3))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pcloud_unroll_0 = pcloud0.copy()
pcloud_unroll_0[:,:3] += pos_f0 / (1.,1.,3.)
pcloud_unroll_1 = pcloud1.copy()
pcloud_unroll_1[:,:3] += pos_f1 / (1.,1.,3.)
return pcloud0, pcloud1,pcloud_unroll_0,pcloud_unroll_1
return pcloud0, pcloud1
def refine_mesh(self, mesh, occ_hat, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
self.model.eval_points(face_point.unsqueeze(0), c)['occ'].logits
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh
def generate_occ_grid(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
return value_grid
|
NVlabs/ACID/ACID/src/conv_onet/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from src.common import normalize_coordinate, normalize_3d_coordinate, map2local
class GeomDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1):
super().__init__()
self.c_dim = c_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fc_c_occ = nn.ModuleList([
nn.Linear(c_dim, hidden_size) for i in range(n_blocks)
])
self.fc_p = nn.Linear(dim, hidden_size)
self.blocks_occ = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def forward(self, p, c_plane, **kwargs):
c = 0
c += self.sample_plane_feature(p, c_plane['xz'], plane='xz')
c += self.sample_plane_feature(p, c_plane['xy'], plane='xy')
c += self.sample_plane_feature(p, c_plane['yz'], plane='yz')
c = c.transpose(1, 2)
p = p.float()
x = self.fc_p(p)
net = x
for i in range(self.n_blocks):
net = net + self.fc_c_occ[i](c)
net = self.blocks_occ[i](net)
results = {}
if self.corr_dim != 0 and not self.corr_head:
results['corr'] = net
net = self.actvn(net)
results['occ'] = self.fc_occ(net).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
results['corr'] = self.fc_out_corr(net)
return results
class CombinedDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_per_dim=128, c_act_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1, fuse=True, detach=False, anneal_gradient=True):
super().__init__()
self.c_per_dim = c_per_dim
self.c_act_dim = c_act_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fuse = fuse
self.detach = detach
self.anneal_gradient = anneal_gradient
self.fc_c_per = nn.ModuleList([
nn.Linear(c_per_dim, hidden_size) for i in range(n_blocks)
])
self.fc_c_act = nn.ModuleList([
nn.Linear(c_act_dim, hidden_size) for i in range(n_blocks)
])
if self.fuse:
self.fc_c_merge = nn.ModuleList([
nn.Linear(hidden_size*2, hidden_size) for i in range(n_blocks)
])
self.fc_p_per = nn.Linear(dim, hidden_size)
self.fc_p_act = nn.Linear(dim, hidden_size)
self.blocks_per = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.blocks_act = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
self.fc_flow= nn.Linear(hidden_size, 3)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if self.fuse:
self.fc_act_corr_merge = nn.Linear(hidden_size+corr_dim, hidden_size)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def decode_perception(self, p, c_per_plane):
c_per = 0
c_per += self.sample_plane_feature(p, c_per_plane['xz'], plane='xz')
c_per += self.sample_plane_feature(p, c_per_plane['xy'], plane='xy')
c_per += self.sample_plane_feature(p, c_per_plane['yz'], plane='yz')
c_per = c_per.transpose(1, 2)
p = p.float()
net_per = self.fc_p_per(p)
features = []
for i in range(self.n_blocks):
net_per = net_per + self.fc_c_per[i](c_per)
net_per = self.blocks_per[i](net_per)
if self.detach:
features.append(net_per.detach())
else:
features.append(net_per)
net_per = self.actvn(net_per)
results = {}
results['occ'] = self.fc_occ(net_per).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
corr = self.fc_out_corr(net_per)
features.append(corr)
results['corr'] = corr
# if self.anneal_gradient:
# for i,p in enumerate(features):
# features[i] = p * 0.1 + p.detach() * 0.9
return results, features
def decode_action(self, p, c_act_plane, per_features):
c_act = 0
c_act += self.sample_plane_feature(p, c_act_plane['xz'], plane='xz')
c_act += self.sample_plane_feature(p, c_act_plane['xy'], plane='xy')
c_act += self.sample_plane_feature(p, c_act_plane['yz'], plane='yz')
c_act = c_act.transpose(1, 2)
p = p.float()
net_act = self.fc_p_act(p)
for i in range(self.n_blocks):
net_act = net_act + self.fc_c_act[i](c_act)
if self.fuse:
net_act = self.blocks_act[i](
self.fc_c_merge[i](
torch.cat( ( net_act, per_features[i]), dim=-1)))
# (net_per.detach()*0.9+net_per * 0.1)), dim=-1)))
else:
net_act = self.blocks_act[i](net_act)
net_act = self.actvn(net_act)
if self.corr_dim != 0 and self.corr_head:
if self.fuse:
net_act = self.fc_act_corr_merge(
torch.cat((net_act, per_features[-1].detach()), dim=-1))
return {'flow':self.fc_flow(net_act)}
def forward(self, p, c_per_plane, c_act_plane):
results, per_features = self.decode_perception(p, c_per_plane)
results['flow'] = self.decode_action(p, c_act_plane, per_features)['flow']
return results
|
NVlabs/ACID/ACID/src/conv_onet/models/__init__.py | import torch
import numpy as np
import torch.nn as nn
from torch import distributions as dist
from src.conv_onet.models import decoder
from src.utils import plushsim_util
# Decoder dictionary
decoder_dict = {
'geom_decoder': decoder.GeomDecoder,
'combined_decoder': decoder.CombinedDecoder,
}
class ConvImpDyn(nn.Module):
def __init__(self, obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_per_encoder = obj_per_encoder.to(device)
self.obj_act_encoder = obj_act_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c_per, c_act = self.encode_inputs(inputs)
return self.decode(inputs, c_per, c_act, **kwargs)
def forward_perception(self, inputs, filter=True,):
c_per, c_env = self.encode_perception(inputs, merge_env_feature=False)
for k in c_per.keys():
env_f = self.env_upsample(c_env[k])
c_env[k] = env_f
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
# get curr observation state and features
p = inputs['sampled_pts']
if len(p.shape) > 3:
B,_,N,C = p.shape
curr_p = p.reshape([B*2,N,C])
else:
curr_p = p
curr_state, per_features = self.decoder.decode_perception(curr_p, c_per)
occ_pred = dist.Bernoulli(logits=curr_state['occ']).probs >= 0.5
curr_state['occ'] = occ_pred
if filter:
curr_p = curr_p[occ_pred]
if 'corr' in curr_state:
curr_state['corr'] = curr_state['corr'][occ_pred]
for i,p in enumerate(per_features):
per_features[i] = p[occ_pred]
return c_per, c_env, curr_p, curr_state, per_features
def rollout(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
import time
# from functools import partial
# render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
act_traj = []
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
start_time = time.time()
c_act, act_partial = self.get_action_encoding(curr_pts[i], g, t, c_env)
total_time_act_render += time.time() - start_time
act_traj.append(act_partial)
start_time = time.time()
flow = self.decoder.decode_action(curr_pts[i], c_act, per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
total_time_act_decode += time.time() - start_time
all_traj.append((curr_pts.copy(), act_traj))
print("total time render: ",total_time_act_render)
print("total time decode: ",total_time_act_decode)
return all_traj
def rollout_async(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
total_async_time_act_render = 0
import time
from functools import partial
render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
start_time = time.time()
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
from multiprocessing import Pool
with Pool(16) as p:
vis_idxes = p.map(render_pts_func, points_world)
xyzs, acts = [],[]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
# c_act, act_partial = self.get_action_encoding(
# curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
obj_xyz, obj_act = self.get_action_encoding_new(
curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
xyzs.append(obj_xyz)
acts.append(obj_act)
total_time_act_render += time.time() - start_time
n = 20
start_time = time.time()
xyz_chunks = [xyzs[i:i+n] for i in range(0, num_sequence, n)]
act_chunks = [acts[i:i+n] for i in range(0, num_sequence, n)]
c_acts = []
for xyz, act in zip(xyz_chunks, act_chunks):
obj_xyz = torch.as_tensor(np.stack(xyz).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(act).astype(np.float32)).to(self._device)
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
for chunk_i in range(len(xyz)):
c_act = {}
for k in c_act_new.keys():
c_act[k] = torch.cat([c_act_new[k][chunk_i].unsqueeze(0), c_env[k]], dim=1)
c_acts.append(c_act)
total_time_act_decode += time.time() - start_time
from src.utils import common_util
from PIL import Image
for k,v in c_acts[0].items():
v_np = v.squeeze().permute(1,2,0).cpu().numpy()
feature_plane = v_np.reshape([-1, v_np.shape[-1]])
tsne_result = common_util.embed_tsne(feature_plane)
colors = common_util.get_color_map(tsne_result)
colors = colors.reshape((128,128,-1)).astype(np.float32)
colors = (colors * 255 / np.max(colors)).astype('uint8')
img = Image.fromarray(colors)
img.save(f"act_{k}.png")
import pdb; pdb.set_trace()
for i in range(num_sequence):
flow = self.decoder.decode_action(curr_pts[i], c_acts[i], per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
all_traj.append(([p.cpu().numpy().squeeze() for p in curr_pts], xyzs))
return all_traj
def get_action_encoding_new(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
return obj_xyz, obj_act
def get_action_encoding(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
xyzs.append(obj_xyz)
acts.append(obj_act)
obj_xyz = torch.as_tensor(np.stack(xyzs).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(acts).astype(np.float32)).to(self._device)
#print("time split 2: ", time.time() - start_time)
start_time = time.time()
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
#print("time split 3: ", time.time() - start_time)
start_time = time.time()
for k in c_act_new.keys():
c_act_new[k] = torch.cat([c_act_new[k], c_env[k]], dim=1)
#print("time split 4: ", time.time() - start_time)
start_time = time.time()
return c_act_new, obj_xyz
def encode_perception(self, inputs, merge_env_feature=True):
obj_pcloud = inputs['obj_obs']
if len(obj_pcloud.shape) > 3:
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:6]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
if len(env_pcloud.shape) > 3:
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
if merge_env_feature:
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
else:
return c_per, env_features
return c_per
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb, obj_act = obj_pcloud[...,:3],obj_pcloud[...,3:6],obj_pcloud[...,6:]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
c_act = self.obj_act_encoder((obj_xyz, obj_act))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
c_act[k] = torch.cat([c_act[k], env_f], dim=1)
return c_per, c_act
def eval_points(self, pts, c):
outputs = self.decoder(pts, *c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c1, c2, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c1, c2)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
if 'skip_indexing' not in kwargs:
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
class ConvOccGeom(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
device (device): torch device
'''
def __init__(self, obj_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_encoder = obj_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c = self.encode_inputs(inputs)
return self.decode(inputs, c, **kwargs)
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:]
obj_features = self.obj_encoder((obj_xyz, obj_rgb))
if self.env_encoder is None:
return obj_features
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
joint_features = {}
for k in obj_features.keys():
env_f = self.env_upsample(env_features[k])
joint_features[k] = torch.cat([obj_features[k], env_f], dim=1)
return joint_features
def eval_points(self, pts, c):
outputs = self.decoder(pts, c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c, **kwargs)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
|
NVlabs/ACID/ACID/src/encoder/__init__.py | from src.encoder import (
pointnet
)
encoder_dict = {
'geom_encoder': pointnet.GeomEncoder,
}
|
NVlabs/ACID/ACID/src/encoder/pointnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from torch_scatter import scatter_mean, scatter_max
from src.common import coordinate2index, normalize_coordinate
from src.encoder.unet import UNet
class GeomEncoder(nn.Module):
''' PointNet-based encoder network with ResNet blocks for each point.
Number of input points are fixed.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
scatter_type (str): feature aggregation when doing local pooling
unet (bool): weather to use U-Net
unet_kwargs (str): U-Net parameters
unet3d (bool): weather to use 3D U-Net
unet3d_kwargs (str): 3D U-Net parameters
plane_resolution (int): defined resolution for plane feature
grid_resolution (int): defined resolution for grid feature
plane_type (str): feature type, 'xz' - 1-plane, ['xz', 'xy', 'yz'] - 3-plane, ['grid'] - 3D grid volume
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
n_blocks (int): number of blocks ResNetBlockFC layers
'''
def __init__(self, c_dim=128, dim=3, f_dim=9, hidden_dim=128, scatter_type='max',
unet_kwargs=None, plane_resolution=None, padding=0.1, n_blocks=5):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim+f_dim, 2*hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(2*hidden_dim, hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.hidden_dim = hidden_dim
self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs)
self.reso_plane = plane_resolution
self.padding = padding
if scatter_type == 'max':
self.scatter = scatter_max
elif scatter_type == 'mean':
self.scatter = scatter_mean
else:
raise ValueError('incorrect scatter type')
def generate_plane_features(self, p, c, plane='xz'):
# acquire indices of features in plane
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
index = coordinate2index(xy, self.reso_plane)
# scatter plane features from points
fea_plane = c.new_zeros(p.size(0), self.c_dim, self.reso_plane**2)
c = c.permute(0, 2, 1) # B x 512 x T
fea_plane = scatter_mean(c, index, out=fea_plane) # B x 512 x reso^2
fea_plane = fea_plane.reshape(p.size(0), self.c_dim, self.reso_plane, self.reso_plane) # sparce matrix (B x 512 x reso x reso)
# process the plane features with UNet
fea_plane = self.unet(fea_plane)
return fea_plane
def pool_local(self, xy, index, c):
bs, fea_dim = c.size(0), c.size(2)
keys = xy.keys()
c_out = 0
for key in keys:
# scatter plane features from points
fea = self.scatter(c.permute(0, 2, 1), index[key], dim_size=self.reso_plane**2)
if self.scatter == scatter_max:
fea = fea[0]
# gather feature back to points
fea = fea.gather(dim=2, index=index[key].expand(-1, fea_dim, -1))
c_out += fea
return c_out.permute(0, 2, 1)
def forward(self, p):
if type(p) is tuple:
p, pf = p
else:
pf = None
# acquire the index for each point
coord = {}
index = {}
coord['xz'] = normalize_coordinate(p.clone(), plane='xz', padding=self.padding)
index['xz'] = coordinate2index(coord['xz'], self.reso_plane)
coord['xy'] = normalize_coordinate(p.clone(), plane='xy', padding=self.padding)
index['xy'] = coordinate2index(coord['xy'], self.reso_plane)
coord['yz'] = normalize_coordinate(p.clone(), plane='yz', padding=self.padding)
index['yz'] = coordinate2index(coord['yz'], self.reso_plane)
net = self.fc_pos(torch.cat([p, pf],dim=-1))
net = self.blocks[0](net)
for block in self.blocks[1:]:
pooled = self.pool_local(coord, index, net)
net = torch.cat([net, pooled], dim=2)
net = block(net)
c = self.fc_c(net)
fea = {}
fea['xz'] = self.generate_plane_features(p, c, plane='xz')
fea['xy'] = self.generate_plane_features(p, c, plane='xy')
fea['yz'] = self.generate_plane_features(p, c, plane='yz')
return fea
|
NVlabs/ACID/ACID/src/encoder/unet.py | '''
Codes are from:
https://github.com/jaxony/unet-pytorch/blob/master/model.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np
def conv3x3(in_channels, out_channels, stride=1,
padding=1, bias=True, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
groups=groups)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=2,
stride=2)
else:
# out_channels is always going to be the same
# as in_channels
return nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
conv1x1(in_channels, out_channels))
def conv1x1(in_channels, out_channels, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
groups=groups,
stride=1)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
class UpConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels,
merge_mode='concat', up_mode='transpose'):
super(UpConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = up_mode
self.upconv = upconv2x2(self.in_channels, self.out_channels,
mode=self.up_mode)
if self.merge_mode == 'concat':
self.conv1 = conv3x3(
2*self.out_channels, self.out_channels)
else:
# num of input channels to conv2 is same
self.conv1 = conv3x3(self.out_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
def forward(self, from_down, from_up):
""" Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway
"""
from_up = self.upconv(from_up)
if self.merge_mode == 'concat':
x = torch.cat((from_up, from_down), 1)
else:
x = from_up + from_down
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
class UNet(nn.Module):
""" `UNet` class is based on https://arxiv.org/abs/1505.04597
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
(1) padding is used in 3x3 convolutions to prevent loss
of border pixels
(2) merging outputs does not require cropping due to (1)
(3) residual connections can be used by specifying
UNet(merge_mode='add')
(4) if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1 2d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(self, num_classes, in_channels=3, depth=5,
start_filts=64, up_mode='transpose',
merge_mode='concat', **kwargs):
"""
Arguments:
in_channels: int, number of channels in the input tensor.
Default is 3 for RGB images.
depth: int, number of MaxPools in the U-Net.
start_filts: int, number of convolutional filters for the
first conv.
up_mode: string, type of upconvolution. Choices: 'transpose'
for transpose convolution or 'upsample' for nearest neighbour
upsampling.
"""
super(UNet, self).__init__()
if up_mode in ('transpose', 'upsample'):
self.up_mode = up_mode
else:
raise ValueError("\"{}\" is not a valid mode for "
"upsampling. Only \"transpose\" and "
"\"upsample\" are allowed.".format(up_mode))
if merge_mode in ('concat', 'add'):
self.merge_mode = merge_mode
else:
raise ValueError("\"{}\" is not a valid mode for"
"merging up and down paths. "
"Only \"concat\" and "
"\"add\" are allowed.".format(up_mode))
# NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
if self.up_mode == 'upsample' and self.merge_mode == 'add':
raise ValueError("up_mode \"upsample\" is incompatible "
"with merge_mode \"add\" at the moment "
"because it doesn't make sense to use "
"nearest neighbour to reduce "
"depth channels (by half).")
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down_convs = []
self.up_convs = []
# create the encoder pathway and add to a list
for i in range(depth):
ins = self.in_channels if i == 0 else outs
outs = self.start_filts*(2**i)
pooling = True if i < depth-1 else False
down_conv = DownConv(ins, outs, pooling=pooling)
self.down_convs.append(down_conv)
# create the decoder pathway and add to a list
# - careful! decoding only requires depth-1 blocks
for i in range(depth-1):
ins = outs
outs = ins // 2
up_conv = UpConv(ins, outs, up_mode=up_mode,
merge_mode=merge_mode)
self.up_convs.append(up_conv)
# add the list of modules to current module
self.down_convs = nn.ModuleList(self.down_convs)
self.up_convs = nn.ModuleList(self.up_convs)
self.conv_final = conv1x1(outs, self.num_classes)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
init.constant_(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
encoder_outs = []
# encoder pathway, save outputs for merging
for i, module in enumerate(self.down_convs):
x, before_pool = module(x)
encoder_outs.append(before_pool)
for i, module in enumerate(self.up_convs):
before_pool = encoder_outs[-(i+2)]
x = module(before_pool, x)
# No softmax is used. This means you need to use
# nn.CrossEntropyLoss is your training script,
# as this module includes a softmax already.
x = self.conv_final(x)
return x
if __name__ == "__main__":
"""
testing
"""
model = UNet(1, depth=5, merge_mode='concat', in_channels=1, start_filts=32)
print(model)
print(sum(p.numel() for p in model.parameters()))
reso = 176
x = np.zeros((1, 1, reso, reso))
x[:,:,int(reso/2-1), int(reso/2-1)] = np.nan
x = torch.FloatTensor(x)
out = model(x)
print('%f'%(torch.sum(torch.isnan(out)).detach().cpu().numpy()/(reso*reso)))
# loss = torch.sum(out)
# loss.backward()
|
NVlabs/ACID/ACID/src/utils/common_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def get_color_map(x):
colours = plt.cm.Spectral(x)
return colours[:, :3]
def embed_tsne(data):
"""
N x D np.array data
"""
tsne = TSNE(n_components=1, verbose=0, perplexity=40, n_iter=300, random_state=0)
tsne_results = tsne.fit_transform(data)
tsne_results = np.squeeze(tsne_results)
tsne_min = np.min(tsne_results)
tsne_max = np.max(tsne_results)
return (tsne_results - tsne_min) / (tsne_max - tsne_min)
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr, return_index=False):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if return_index:
return min_idx
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
def get_trunc_ab_range(mean_min, mean_max, std, a, b):
return (a - mean_min) / std, (b - mean_max) /std
def transform_points(pointcloud, from_range, to_range):
if len(pointcloud.shape) == 1:
pointcloud = pointcloud.reshape([1,-1])
if pointcloud.shape[1] == 6:
xyz = pointcloud[:,:3]
rgb = pointcloud[:,3:]
else:
xyz = pointcloud
rgb = None
from_center = np.mean(from_range, axis=0)
from_size = np.ptp(from_range, axis=0)
to_center = np.mean(to_range, axis=0)
to_size = np.ptp(to_range, axis=0)
xyz = (xyz - from_center) / from_size * to_size + to_center
if rgb is None:
return xyz
else:
return np.concatenate([xyz, rgb], axis=-1)
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def set_background_blank(ax):
# Hide grid lines
ax.grid(False)
ax.set_axis_off()
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# First remove fill
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Now set color to white (or whatever is "invisible")
ax.xaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.yaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.zaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,1][viz_idx],
pts[:,2][viz_idx],
flow[:,0], flow[:,1], flow[:,2],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,1],
pts[:,2], color=col,s=0.5)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
set_axes_equal(ax)
set_background_blank(ax)
fig.tight_layout()
return fig
def write_pointcoud_as_obj(path, xyzrgb, faces=None):
with open(path, 'w') as fp:
if xyzrgb.shape[1] == 6:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
if faces is not None:
for f in faces:
f_str = " ".join([str(i) for i in f])
fp.write(f"f {f_str}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
if points.shape[1] == 6:
xyz = points[:,:3]
else:
xyz = points
if points.shape[0] == 0:
if return_index:
return np.arange(0)
return points
idx = np.unique(xyz// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
if x.shape[0] == 0:
return 0,0,0
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall |
NVlabs/ACID/ACID/src/utils/io.py | import os
from plyfile import PlyElement, PlyData
import numpy as np
def export_pointcloud(vertices, out_file, as_text=True):
assert(vertices.shape[1] == 3)
vertices = vertices.astype(np.float32)
vertices = np.ascontiguousarray(vertices)
vector_dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertices = vertices.view(dtype=vector_dtype).flatten()
plyel = PlyElement.describe(vertices, 'vertex')
plydata = PlyData([plyel], text=as_text)
plydata.write(out_file)
def load_pointcloud(in_file):
plydata = PlyData.read(in_file)
vertices = np.stack([
plydata['vertex']['x'],
plydata['vertex']['y'],
plydata['vertex']['z']
], axis=1)
return vertices
def read_off(file):
"""
Reads vertices and faces from an off file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
# Fix for ModelNet bug were 'OFF' and the number of vertices and faces
# are all in the first line.
if len(lines[0]) > 3:
assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', \
'invalid OFF file %s' % file
parts = lines[0][3:].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 1
# This is the regular case!
else:
assert lines[0] == 'OFF' or lines[0] == 'off', \
'invalid OFF file %s' % file
parts = lines[1].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 2
vertices = []
for i in range(num_vertices):
vertex = lines[start_index + i].split(' ')
vertex = [float(point.strip()) for point in vertex if point != '']
assert len(vertex) == 3
vertices.append(vertex)
faces = []
for i in range(num_faces):
face = lines[start_index + num_vertices + i].split(' ')
face = [index.strip() for index in face if index != '']
# check to be sure
for index in face:
assert index != '', \
'found empty vertex index: %s (%s)' \
% (lines[start_index + num_vertices + i], file)
face = [int(index) for index in face]
assert face[0] == len(face) - 1, \
'face should have %d vertices but as %d (%s)' \
% (face[0], len(face) - 1, file)
assert face[0] == 3, \
'only triangular meshes supported (%s)' % file
for index in face:
assert index >= 0 and index < num_vertices, \
'vertex %d (of %d vertices) does not exist (%s)' \
% (index, num_vertices, file)
assert len(face) > 1
faces.append(face)
return vertices, faces
assert False, 'could not open %s' % file
|
NVlabs/ACID/ACID/src/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import src.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
|
NVlabs/ACID/ACID/src/utils/__init__.py | |
NVlabs/ACID/ACID/src/utils/mentalsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -400, 0], [600, 400, 400]])
def get_scene_partial_pointcloud(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/img/{{}}_{int_id:04d}_{frame_id:06d}.{{}}"
depth_img = path.format('depth', 'png')
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = path.format('rgb', 'jpg')
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = path.format('seg', 'jpg')
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
########################################################################
# Get geometric state (full experiment)
########################################################################
def get_object_full_points(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/geom/{int_id:04d}_{frame_id:06d}.npz"
geom_data = np.load(path)
loc = geom_data['loc']
print(geom_data['rot'])
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts
########################################################################
# partial observation getter for teddy toy example
########################################################################
def get_teddy_partial_pointcloud(int_group, int_id, frame_id, data_root, cam_id='cam0'):
#depth_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_depth.png")[0]
depth_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_depth.png"
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
#rgb_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_rgb.png")[0]
rgb_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_rgb.png"
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
#seg_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_seg.png")[0]
seg_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_seg.png"
seg_img = np.array(Image.open(seg_img))
non_env = np.where(seg_img != 0)
ospdir= os.path.dirname
root_dir = ospdir(ospdir(ospdir(os.path.realpath(__file__))))
camera_json = os.path.join(root_dir, "metadata", "camera.json")
with open(camera_json, 'r') as fp:
cam_info = json.load(fp)
for k in cam_info.keys():
cam_extr, cam_intr = cam_info[k]
cam_info[k] = np.array(cam_extr), np.array(cam_intr)
cam_extr, cam_intr = cam_info[cam_id]
partial_points = project_depth_world_space(depth_vals, cam_intr, cam_extr, keep_dim=True)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
xyzrgb = partial_points_rgb[non_env]
xyz = xyzrgb[:,:3]
xyz = world_coord_view_augmentation(cam_id, xyz)
rgb = xyzrgb[:,3:]
return xyz/ 10. * 1.1, rgb
########################################################################
# Get meta info (teddy toy example)
########################################################################
def get_teddy_loc(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
return np.array(dict(zip(int_info['frames'], int_info['teddy_loc']))[frame_id])
def get_teddy_rot(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
w,x,y,z = np.array(dict(zip(int_info['frames'], int_info['teddy_rot']))[frame_id])
return np.array([x,y,z,w])
def get_action_info(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
grasp_loc = np.array(int_info['grasp'])
target_loc = np.array(int_info['target'])
return grasp_loc, target_loc
def get_release_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['release_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_release_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
def get_end_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['end_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_static_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
########################################################################
# Get geometric state (teddy toy example)
########################################################################
def get_teddy_full_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_sim.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_sim.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
def get_teddy_vis_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_vis.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_vis.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(int_group, int_id, frame_id, data_root, sample_scheme='uniform'):
if sample_scheme not in ['uniform', 'gaussian']:
raise ValueError('Unsupported sampling scheme for occupancy')
num_pts = 100000
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
else:
x,y,z= get_teddy_loc(int_group, int_id, frame_id, data_root) / 10. * 1.1
std = 0.18
a, b = -0.55, 0.55
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
teddy_sim_points = get_teddy_full_points(int_group, int_id, frame_id, data_root)
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(teddy_sim_points)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
occ = dist < 0.01
pt_class = ind[occ != 0]
return pts, occ, pt_class
def sample_occupancies_with_flow(int_group, int_id, release_frame, end_frame, data_root, sample_scheme='uniform'):
pts, occ, ind = sample_occupancies(int_group, int_id, 0, data_root, sample_scheme)
xyz0 = get_teddy_full_points(int_group, int_id, 0, data_root)
f1 = get_teddy_full_points(int_group, int_id, release_frame, data_root) - xyz0
f2 = get_teddy_full_points(int_group, int_id, end_frame, data_root) - xyz0
return pts, occ, ind, f1[ind],f2[ind]
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall |
NVlabs/ACID/ACID/src/utils/plushsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from .common_util import *
########################################################################
# Some file getters
########################################################################
def get_model_dir(data_root, split_id, model_category, model_name):
return f"{data_root}/{split_id}/{model_category}/{model_name}"
def get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{reset_id:04d}.npz"
def get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{reset_id:04d}_{frame_id:06d}.npz"
def get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{reset_id:04d}_{frame_id:06d}.{{}}"
def get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('rgb', 'jpg')
def get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('depth', 'png')
def get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('seg', 'jpg')
def get_flow_data_file(flow_root,split_id, model_id, reset_id, int_id):
return f"{flow_root}/{split_id}/{model_id}/{reset_id:03d}_{int_id:03d}.npz"
def get_flow_pair_data_file(pair_root,split_id, model_id, reset_id, int_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{int_id:03d}.npz"
def get_geom_data_file(geom_root,split_id, model_id, reset_id, frame_id):
return f"{geom_root}/{split_id}/{model_id}/{reset_id:03d}_{frame_id:06d}.npz"
def get_pair_data_file(pair_root,split_id, model_id, reset_id, frame_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{frame_id:06d}.npz"
# Getters for plan data
def get_plan_geom_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{scenario_id:04d}_{seq_str}_{frame_id}.npz"
def get_plan_interaction_info_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{scenario_id:04d}_{seq_str}.npz"
def get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{scenario_id:04d}_{seq_str}_{frame_id}.{{}}"
def get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('rgb', 'jpg')
def get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('depth', 'png')
def get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('seg', 'jpg')
def get_plan_perf_file(data_root, split_id, model_category, model_name, scenario_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/perf_{scenario_id:04d}.npz"
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -600, -20], [600, 600, 380]])
def get_plan_scene_partial_pointcloud(
model_category, model_name, split_id, scenario_id, sequence_id, frame_id, data_root):
depth_img = get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def get_scene_partial_pointcloud(model_category, model_name, split_id, reset_id, frame_id, data_root):
depth_img = get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def render_points(world_points, cam_extr=None, cam_intr=None, return_index=False, filter_in_cam=True):
if cam_extr is None:
cam_extr = CAM_EXTR
if cam_intr is None:
cam_intr = CAM_INTR
cam_points = transform_points_world_to_cam(world_points, cam_extr) / 100.
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 6) * 1000 + np.rint(cam_pts_x / 6)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if filter_in_cam:
in_cam = np.where(np.logical_and(cam_pts_x > 0, cam_pts_y > 0))[0]
min_idx = np.intersect1d(in_cam, min_idx, assume_unique=True)
if return_index:
return min_idx
return world_points[min_idx]
########################################################################
# Get geometric state (full experiment)
########################################################################
def extract_full_points(path):
geom_data = np.load(path)
loc = geom_data['loc']
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts, loc, rot, scale
def get_object_full_points(model_category, model_name, split_id, reset_id, frame_id, data_root):
path = get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id)
return extract_full_points(path)
def get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root):
obj_info = get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id)
int_info = np.load(obj_info)
grasp_loc = np.array(int_info['grasp_points'][interaction_id])
target_loc = np.array(int_info['target_points'][interaction_id])
start_frame = int_info['start_frames'][interaction_id]
release_frame = int_info['release_frames'][interaction_id]
static_frame = int_info['static_frames'][interaction_id]
return grasp_loc, target_loc, start_frame, release_frame, static_frame
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(full_pts, center,
sample_scheme='gaussian',
num_pts = 100000, bound=0.55,
std=0.1):
if sample_scheme not in ['uniform', 'gaussian', 'object']:
raise ValueError('Unsupported sampling scheme for occupancy')
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
elif sample_scheme == 'object':
displace = full_pts[np.random.randint(full_pts.shape[0], size=num_pts)]
x_min,y_min,z_min = full_pts.min(axis=0)
x_max,y_max,z_max = full_pts.max(axis=0)
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(x_min, x_max, std, a, b), loc=0, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(y_min, y_max, std, a, b), loc=0, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(z_min, z_max, std, a, b), loc=0, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T + displace
else:
x,y,z= center
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(full_pts)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
#points_in = points_uniform[np.where(points_distance< 0.1)]
occ = dist < 0.01
#pt_class = ind[np.where(dist < 0.01)]
pt_class = ind[occ != 0]
return pts, occ, pt_class
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
def miou(x, y, th=0.01):
x = subsample_points(x, resolution=th, return_index=False) // th
y = subsample_points(y, resolution=th, return_index=False) // th
xset = set([tuple(i) for i in x])
yset = set([tuple(i) for i in y])
return len(xset & yset) / len(xset | yset)
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall
from scipy.spatial import cKDTree
def find_nn_cpu(feat0, feat1, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=1, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
def find_emd_cpu(feat0, feat1, return_distance=False):
import time
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
d = cdist(feat0, feat1)
feat0_inds, feat1_inds = linear_sum_assignment(d)
return feat0_inds, feat1_inds
def find_nn_cpu_symmetry_consistent(feat0, feat1, pts0, pts1, n_neighbor=10, local_radis=0.05, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=n_neighbor, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
#################################
# ranking utilities
def overlap(list1, list2, depth):
"""Overlap which accounts for possible ties.
This isn't mentioned in the paper but should be used in the ``rbo*()``
functions below, otherwise overlap at a given depth might be > depth which
inflates the result.
There are no guidelines in the paper as to what's a good way to calculate
this, but a good guess is agreement scaled by the minimum between the
requested depth and the lengths of the considered lists (overlap shouldn't
be larger than the number of ranks in the shorter list, otherwise results
are conspicuously wrong when the lists are of unequal lengths -- rbo_ext is
not between rbo_min and rbo_min + rbo_res.
>>> overlap("abcd", "abcd", 3)
3.0
>>> overlap("abcd", "abcd", 5)
4.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 2)
2.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 3)
3.0
"""
return agreement(list1, list2, depth) * min(depth, len(list1), len(list2))
def rbo_ext(list1, list2, p=0.9):
"""RBO point estimate based on extrapolating observed overlap.
See equation (32) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible.
>>> _round(rbo_ext("abcdefg", "abcdefg", .9))
1.0
>>> _round(rbo_ext("abcdefg", "bacdefg", .9))
0.9
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l)
x_s = overlap(list1, list2, s)
# the paper says overlap(..., d) / d, but it should be replaced by
# agreement(..., d) defined as per equation (28) so that ties are handled
# properly (otherwise values > 1 will be returned)
# sum1 = sum(p**d * overlap(list1, list2, d)[0] / d for d in range(1, l + 1))
sum1 = sum(p ** d * agreement(list1, list2, d) for d in range(1, l + 1))
sum2 = sum(p ** d * x_s * (d - s) / s / d for d in range(s + 1, l + 1))
term1 = (1 - p) / p * (sum1 + sum2)
term2 = p ** l * ((x_l - x_s) / l + x_s / s)
return term1 + term2
def set_at_depth(lst, depth):
ans = set()
for v in lst[:depth]:
if isinstance(v, set):
ans.update(v)
else:
ans.add(v)
return ans
def raw_overlap(list1, list2, depth):
"""Overlap as defined in the article.
"""
set1, set2 = set_at_depth(list1, depth), set_at_depth(list2, depth)
return len(set1.intersection(set2)), len(set1), len(set2)
def agreement(list1, list2, depth):
"""Proportion of shared values between two sorted lists at given depth.
>>> _round(agreement("abcde", "abdcf", 1))
1.0
>>> _round(agreement("abcde", "abdcf", 3))
0.667
>>> _round(agreement("abcde", "abdcf", 4))
1.0
>>> _round(agreement("abcde", "abdcf", 5))
0.8
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 1))
0.667
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 2))
1.0
"""
len_intersection, len_set1, len_set2 = raw_overlap(list1, list2, depth)
return 2 * len_intersection / (len_set1 + len_set2)
|
NVlabs/ACID/ACID/src/utils/libmise/__init__.py | from .mise import MISE
__all__ = [
MISE
]
|
NVlabs/ACID/ACID/src/utils/libmise/test.py | import numpy as np
from mise import MISE
import time
t0 = time.time()
extractor = MISE(1, 2, 0.)
p = extractor.query()
i = 0
while p.shape[0] != 0:
print(i)
print(p)
v = 2 * (p.sum(axis=-1) > 2).astype(np.float64) - 1
extractor.update(p, v)
p = extractor.query()
i += 1
if (i >= 8):
break
print(extractor.to_dense())
# p, v = extractor.get_points()
# print(p)
# print(v)
print('Total time: %f' % (time.time() - t0))
|
NVlabs/ACID/ACID/src/utils/libsimplify/__init__.py | from .simplify_mesh import (
mesh_simplify
)
import trimesh
def simplify_mesh(mesh, f_target=10000, agressiveness=7.):
vertices = mesh.vertices
faces = mesh.faces
vertices, faces = mesh_simplify(vertices, faces, f_target, agressiveness)
mesh_simplified = trimesh.Trimesh(vertices, faces, process=False)
return mesh_simplified
|
NVlabs/ACID/ACID/src/utils/libsimplify/test.py | from simplify_mesh import mesh_simplify
import numpy as np
v = np.random.rand(100, 3)
f = np.random.choice(range(100), (50, 3))
mesh_simplify(v, f, 50) |
NVlabs/ACID/ACID/src/utils/libsimplify/Simplify.h | /////////////////////////////////////////////
//
// Mesh Simplification Tutorial
//
// (C) by Sven Forstmann in 2014
//
// License : MIT
// http://opensource.org/licenses/MIT
//
//https://github.com/sp4cerat/Fast-Quadric-Mesh-Simplification
//
// 5/2016: Chris Rorden created minimal version for OSX/Linux/Windows compile
//#include <iostream>
//#include <stddef.h>
//#include <functional>
//#include <sys/stat.h>
//#include <stdbool.h>
#include <string.h>
//#include <ctype.h>
//#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <map>
#include <vector>
#include <string>
#include <math.h>
#include <float.h> //FLT_EPSILON, DBL_EPSILON
#define loopi(start_l,end_l) for ( int i=start_l;i<end_l;++i )
#define loopi(start_l,end_l) for ( int i=start_l;i<end_l;++i )
#define loopj(start_l,end_l) for ( int j=start_l;j<end_l;++j )
#define loopk(start_l,end_l) for ( int k=start_l;k<end_l;++k )
struct vector3
{
double x, y, z;
};
struct vec3f
{
double x, y, z;
inline vec3f( void ) {}
//inline vec3f operator =( vector3 a )
// { vec3f b ; b.x = a.x; b.y = a.y; b.z = a.z; return b;}
inline vec3f( vector3 a )
{ x = a.x; y = a.y; z = a.z; }
inline vec3f( const double X, const double Y, const double Z )
{ x = X; y = Y; z = Z; }
inline vec3f operator + ( const vec3f& a ) const
{ return vec3f( x + a.x, y + a.y, z + a.z ); }
inline vec3f operator += ( const vec3f& a ) const
{ return vec3f( x + a.x, y + a.y, z + a.z ); }
inline vec3f operator * ( const double a ) const
{ return vec3f( x * a, y * a, z * a ); }
inline vec3f operator * ( const vec3f a ) const
{ return vec3f( x * a.x, y * a.y, z * a.z ); }
inline vec3f v3 () const
{ return vec3f( x , y, z ); }
inline vec3f operator = ( const vector3 a )
{ x=a.x;y=a.y;z=a.z;return *this; }
inline vec3f operator = ( const vec3f a )
{ x=a.x;y=a.y;z=a.z;return *this; }
inline vec3f operator / ( const vec3f a ) const
{ return vec3f( x / a.x, y / a.y, z / a.z ); }
inline vec3f operator - ( const vec3f& a ) const
{ return vec3f( x - a.x, y - a.y, z - a.z ); }
inline vec3f operator / ( const double a ) const
{ return vec3f( x / a, y / a, z / a ); }
inline double dot( const vec3f& a ) const
{ return a.x*x + a.y*y + a.z*z; }
inline vec3f cross( const vec3f& a , const vec3f& b )
{
x = a.y * b.z - a.z * b.y;
y = a.z * b.x - a.x * b.z;
z = a.x * b.y - a.y * b.x;
return *this;
}
inline double angle( const vec3f& v )
{
vec3f a = v , b = *this;
double dot = v.x*x + v.y*y + v.z*z;
double len = a.length() * b.length();
if(len==0)len=0.00001f;
double input = dot / len;
if (input<-1) input=-1;
if (input>1) input=1;
return (double) acos ( input );
}
inline double angle2( const vec3f& v , const vec3f& w )
{
vec3f a = v , b= *this;
double dot = a.x*b.x + a.y*b.y + a.z*b.z;
double len = a.length() * b.length();
if(len==0)len=1;
vec3f plane; plane.cross( b,w );
if ( plane.x * a.x + plane.y * a.y + plane.z * a.z > 0 )
return (double) -acos ( dot / len );
return (double) acos ( dot / len );
}
inline vec3f rot_x( double a )
{
double yy = cos ( a ) * y + sin ( a ) * z;
double zz = cos ( a ) * z - sin ( a ) * y;
y = yy; z = zz;
return *this;
}
inline vec3f rot_y( double a )
{
double xx = cos ( -a ) * x + sin ( -a ) * z;
double zz = cos ( -a ) * z - sin ( -a ) * x;
x = xx; z = zz;
return *this;
}
inline void clamp( double min, double max )
{
if (x<min) x=min;
if (y<min) y=min;
if (z<min) z=min;
if (x>max) x=max;
if (y>max) y=max;
if (z>max) z=max;
}
inline vec3f rot_z( double a )
{
double yy = cos ( a ) * y + sin ( a ) * x;
double xx = cos ( a ) * x - sin ( a ) * y;
y = yy; x = xx;
return *this;
}
inline vec3f invert()
{
x=-x;y=-y;z=-z;return *this;
}
inline vec3f frac()
{
return vec3f(
x-double(int(x)),
y-double(int(y)),
z-double(int(z))
);
}
inline vec3f integer()
{
return vec3f(
double(int(x)),
double(int(y)),
double(int(z))
);
}
inline double length() const
{
return (double)sqrt(x*x + y*y + z*z);
}
inline vec3f normalize( double desired_length = 1 )
{
double square = sqrt(x*x + y*y + z*z);
/*
if (square <= 0.00001f )
{
x=1;y=0;z=0;
return *this;
}*/
//double len = desired_length / square;
x/=square;y/=square;z/=square;
return *this;
}
static vec3f normalize( vec3f a );
static void random_init();
static double random_double();
static vec3f random();
static int random_number;
double random_double_01(double a){
double rnf=a*14.434252+a*364.2343+a*4213.45352+a*2341.43255+a*254341.43535+a*223454341.3523534245+23453.423412;
int rni=((int)rnf)%100000;
return double(rni)/(100000.0f-1.0f);
}
vec3f random01_fxyz(){
x=(double)random_double_01(x);
y=(double)random_double_01(y);
z=(double)random_double_01(z);
return *this;
}
};
vec3f barycentric(const vec3f &p, const vec3f &a, const vec3f &b, const vec3f &c){
vec3f v0 = b-a;
vec3f v1 = c-a;
vec3f v2 = p-a;
double d00 = v0.dot(v0);
double d01 = v0.dot(v1);
double d11 = v1.dot(v1);
double d20 = v2.dot(v0);
double d21 = v2.dot(v1);
double denom = d00*d11-d01*d01;
double v = (d11 * d20 - d01 * d21) / denom;
double w = (d00 * d21 - d01 * d20) / denom;
double u = 1.0 - v - w;
return vec3f(u,v,w);
}
vec3f interpolate(const vec3f &p, const vec3f &a, const vec3f &b, const vec3f &c, const vec3f attrs[3])
{
vec3f bary = barycentric(p,a,b,c);
vec3f out = vec3f(0,0,0);
out = out + attrs[0] * bary.x;
out = out + attrs[1] * bary.y;
out = out + attrs[2] * bary.z;
return out;
}
double min(double v1, double v2) {
return fmin(v1,v2);
}
class SymetricMatrix {
public:
// Constructor
SymetricMatrix(double c=0) { loopi(0,10) m[i] = c; }
SymetricMatrix( double m11, double m12, double m13, double m14,
double m22, double m23, double m24,
double m33, double m34,
double m44) {
m[0] = m11; m[1] = m12; m[2] = m13; m[3] = m14;
m[4] = m22; m[5] = m23; m[6] = m24;
m[7] = m33; m[8] = m34;
m[9] = m44;
}
// Make plane
SymetricMatrix(double a,double b,double c,double d)
{
m[0] = a*a; m[1] = a*b; m[2] = a*c; m[3] = a*d;
m[4] = b*b; m[5] = b*c; m[6] = b*d;
m[7 ] =c*c; m[8 ] = c*d;
m[9 ] = d*d;
}
double operator[](int c) const { return m[c]; }
// Determinant
double det( int a11, int a12, int a13,
int a21, int a22, int a23,
int a31, int a32, int a33)
{
double det = m[a11]*m[a22]*m[a33] + m[a13]*m[a21]*m[a32] + m[a12]*m[a23]*m[a31]
- m[a13]*m[a22]*m[a31] - m[a11]*m[a23]*m[a32]- m[a12]*m[a21]*m[a33];
return det;
}
const SymetricMatrix operator+(const SymetricMatrix& n) const
{
return SymetricMatrix( m[0]+n[0], m[1]+n[1], m[2]+n[2], m[3]+n[3],
m[4]+n[4], m[5]+n[5], m[6]+n[6],
m[ 7]+n[ 7], m[ 8]+n[8 ],
m[ 9]+n[9 ]);
}
SymetricMatrix& operator+=(const SymetricMatrix& n)
{
m[0]+=n[0]; m[1]+=n[1]; m[2]+=n[2]; m[3]+=n[3];
m[4]+=n[4]; m[5]+=n[5]; m[6]+=n[6]; m[7]+=n[7];
m[8]+=n[8]; m[9]+=n[9];
return *this;
}
double m[10];
};
///////////////////////////////////////////
namespace Simplify
{
// Global Variables & Strctures
enum Attributes {
NONE,
NORMAL = 2,
TEXCOORD = 4,
COLOR = 8
};
struct Triangle { int v[3];double err[4];int deleted,dirty,attr;vec3f n;vec3f uvs[3];int material; };
struct Vertex { vec3f p;int tstart,tcount;SymetricMatrix q;int border;};
struct Ref { int tid,tvertex; };
std::vector<Triangle> triangles;
std::vector<Vertex> vertices;
std::vector<Ref> refs;
std::string mtllib;
std::vector<std::string> materials;
// Helper functions
double vertex_error(SymetricMatrix q, double x, double y, double z);
double calculate_error(int id_v1, int id_v2, vec3f &p_result);
bool flipped(vec3f p,int i0,int i1,Vertex &v0,Vertex &v1,std::vector<int> &deleted);
void update_uvs(int i0,const Vertex &v,const vec3f &p,std::vector<int> &deleted);
void update_triangles(int i0,Vertex &v,std::vector<int> &deleted,int &deleted_triangles);
void update_mesh(int iteration);
void compact_mesh();
//
// Main simplification function
//
// target_count : target nr. of triangles
// agressiveness : sharpness to increase the threshold.
// 5..8 are good numbers
// more iterations yield higher quality
//
void simplify_mesh(int target_count, double agressiveness=7, bool verbose=false)
{
// init
loopi(0,triangles.size())
{
triangles[i].deleted=0;
}
// main iteration loop
int deleted_triangles=0;
std::vector<int> deleted0,deleted1;
int triangle_count=triangles.size();
//int iteration = 0;
//loop(iteration,0,100)
for (int iteration = 0; iteration < 100; iteration ++)
{
if(triangle_count-deleted_triangles<=target_count)break;
// update mesh once in a while
if(iteration%5==0)
{
update_mesh(iteration);
}
// clear dirty flag
loopi(0,triangles.size()) triangles[i].dirty=0;
//
// All triangles with edges below the threshold will be removed
//
// The following numbers works well for most models.
// If it does not, try to adjust the 3 parameters
//
double threshold = 0.000000001*pow(double(iteration+3),agressiveness);
// target number of triangles reached ? Then break
if ((verbose) && (iteration%5==0)) {
printf("iteration %d - triangles %d threshold %g\n",iteration,triangle_count-deleted_triangles, threshold);
}
// remove vertices & mark deleted triangles
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
if(t.err[3]>threshold) continue;
if(t.deleted) continue;
if(t.dirty) continue;
loopj(0,3)if(t.err[j]<threshold)
{
int i0=t.v[ j ]; Vertex &v0 = vertices[i0];
int i1=t.v[(j+1)%3]; Vertex &v1 = vertices[i1];
// Border check
if(v0.border != v1.border) continue;
// Compute vertex to collapse to
vec3f p;
calculate_error(i0,i1,p);
deleted0.resize(v0.tcount); // normals temporarily
deleted1.resize(v1.tcount); // normals temporarily
// don't remove if flipped
if( flipped(p,i0,i1,v0,v1,deleted0) ) continue;
if( flipped(p,i1,i0,v1,v0,deleted1) ) continue;
if ( (t.attr & TEXCOORD) == TEXCOORD )
{
update_uvs(i0,v0,p,deleted0);
update_uvs(i0,v1,p,deleted1);
}
// not flipped, so remove edge
v0.p=p;
v0.q=v1.q+v0.q;
int tstart=refs.size();
update_triangles(i0,v0,deleted0,deleted_triangles);
update_triangles(i0,v1,deleted1,deleted_triangles);
int tcount=refs.size()-tstart;
if(tcount<=v0.tcount)
{
// save ram
if(tcount)memcpy(&refs[v0.tstart],&refs[tstart],tcount*sizeof(Ref));
}
else
// append
v0.tstart=tstart;
v0.tcount=tcount;
break;
}
// done?
if(triangle_count-deleted_triangles<=target_count)break;
}
}
// clean up mesh
compact_mesh();
} //simplify_mesh()
void simplify_mesh_lossless(bool verbose=false)
{
// init
loopi(0,triangles.size()) triangles[i].deleted=0;
// main iteration loop
int deleted_triangles=0;
std::vector<int> deleted0,deleted1;
int triangle_count=triangles.size();
//int iteration = 0;
//loop(iteration,0,100)
for (int iteration = 0; iteration < 9999; iteration ++)
{
// update mesh constantly
update_mesh(iteration);
// clear dirty flag
loopi(0,triangles.size()) triangles[i].dirty=0;
//
// All triangles with edges below the threshold will be removed
//
// The following numbers works well for most models.
// If it does not, try to adjust the 3 parameters
//
double threshold = DBL_EPSILON; //1.0E-3 EPS;
if (verbose) {
printf("lossless iteration %d\n", iteration);
}
// remove vertices & mark deleted triangles
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
if(t.err[3]>threshold) continue;
if(t.deleted) continue;
if(t.dirty) continue;
loopj(0,3)if(t.err[j]<threshold)
{
int i0=t.v[ j ]; Vertex &v0 = vertices[i0];
int i1=t.v[(j+1)%3]; Vertex &v1 = vertices[i1];
// Border check
if(v0.border != v1.border) continue;
// Compute vertex to collapse to
vec3f p;
calculate_error(i0,i1,p);
deleted0.resize(v0.tcount); // normals temporarily
deleted1.resize(v1.tcount); // normals temporarily
// don't remove if flipped
if( flipped(p,i0,i1,v0,v1,deleted0) ) continue;
if( flipped(p,i1,i0,v1,v0,deleted1) ) continue;
if ( (t.attr & TEXCOORD) == TEXCOORD )
{
update_uvs(i0,v0,p,deleted0);
update_uvs(i0,v1,p,deleted1);
}
// not flipped, so remove edge
v0.p=p;
v0.q=v1.q+v0.q;
int tstart=refs.size();
update_triangles(i0,v0,deleted0,deleted_triangles);
update_triangles(i0,v1,deleted1,deleted_triangles);
int tcount=refs.size()-tstart;
if(tcount<=v0.tcount)
{
// save ram
if(tcount)memcpy(&refs[v0.tstart],&refs[tstart],tcount*sizeof(Ref));
}
else
// append
v0.tstart=tstart;
v0.tcount=tcount;
break;
}
}
if(deleted_triangles<=0)break;
deleted_triangles=0;
} //for each iteration
// clean up mesh
compact_mesh();
} //simplify_mesh_lossless()
// Check if a triangle flips when this edge is removed
bool flipped(vec3f p,int i0,int i1,Vertex &v0,Vertex &v1,std::vector<int> &deleted)
{
loopk(0,v0.tcount)
{
Triangle &t=triangles[refs[v0.tstart+k].tid];
if(t.deleted)continue;
int s=refs[v0.tstart+k].tvertex;
int id1=t.v[(s+1)%3];
int id2=t.v[(s+2)%3];
if(id1==i1 || id2==i1) // delete ?
{
deleted[k]=1;
continue;
}
vec3f d1 = vertices[id1].p-p; d1.normalize();
vec3f d2 = vertices[id2].p-p; d2.normalize();
if(fabs(d1.dot(d2))>0.999) return true;
vec3f n;
n.cross(d1,d2);
n.normalize();
deleted[k]=0;
if(n.dot(t.n)<0.2) return true;
}
return false;
}
// update_uvs
void update_uvs(int i0,const Vertex &v,const vec3f &p,std::vector<int> &deleted)
{
loopk(0,v.tcount)
{
Ref &r=refs[v.tstart+k];
Triangle &t=triangles[r.tid];
if(t.deleted)continue;
if(deleted[k])continue;
vec3f p1=vertices[t.v[0]].p;
vec3f p2=vertices[t.v[1]].p;
vec3f p3=vertices[t.v[2]].p;
t.uvs[r.tvertex] = interpolate(p,p1,p2,p3,t.uvs);
}
}
// Update triangle connections and edge error after a edge is collapsed
void update_triangles(int i0,Vertex &v,std::vector<int> &deleted,int &deleted_triangles)
{
vec3f p;
loopk(0,v.tcount)
{
Ref &r=refs[v.tstart+k];
Triangle &t=triangles[r.tid];
if(t.deleted)continue;
if(deleted[k])
{
t.deleted=1;
deleted_triangles++;
continue;
}
t.v[r.tvertex]=i0;
t.dirty=1;
t.err[0]=calculate_error(t.v[0],t.v[1],p);
t.err[1]=calculate_error(t.v[1],t.v[2],p);
t.err[2]=calculate_error(t.v[2],t.v[0],p);
t.err[3]=min(t.err[0],min(t.err[1],t.err[2]));
refs.push_back(r);
}
}
// compact triangles, compute edge error and build reference list
void update_mesh(int iteration)
{
if(iteration>0) // compact triangles
{
int dst=0;
loopi(0,triangles.size())
if(!triangles[i].deleted)
{
triangles[dst++]=triangles[i];
}
triangles.resize(dst);
}
//
// Init Quadrics by Plane & Edge Errors
//
// required at the beginning ( iteration == 0 )
// recomputing during the simplification is not required,
// but mostly improves the result for closed meshes
//
if( iteration == 0 )
{
loopi(0,vertices.size())
vertices[i].q=SymetricMatrix(0.0);
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
vec3f n,p[3];
loopj(0,3) p[j]=vertices[t.v[j]].p;
n.cross(p[1]-p[0],p[2]-p[0]);
n.normalize();
t.n=n;
loopj(0,3) vertices[t.v[j]].q =
vertices[t.v[j]].q+SymetricMatrix(n.x,n.y,n.z,-n.dot(p[0]));
}
loopi(0,triangles.size())
{
// Calc Edge Error
Triangle &t=triangles[i];vec3f p;
loopj(0,3) t.err[j]=calculate_error(t.v[j],t.v[(j+1)%3],p);
t.err[3]=min(t.err[0],min(t.err[1],t.err[2]));
}
}
// Init Reference ID list
loopi(0,vertices.size())
{
vertices[i].tstart=0;
vertices[i].tcount=0;
}
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3) vertices[t.v[j]].tcount++;
}
int tstart=0;
loopi(0,vertices.size())
{
Vertex &v=vertices[i];
v.tstart=tstart;
tstart+=v.tcount;
v.tcount=0;
}
// Write References
refs.resize(triangles.size()*3);
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3)
{
Vertex &v=vertices[t.v[j]];
refs[v.tstart+v.tcount].tid=i;
refs[v.tstart+v.tcount].tvertex=j;
v.tcount++;
}
}
// Identify boundary : vertices[].border=0,1
if( iteration == 0 )
{
std::vector<int> vcount,vids;
loopi(0,vertices.size())
vertices[i].border=0;
loopi(0,vertices.size())
{
Vertex &v=vertices[i];
vcount.clear();
vids.clear();
loopj(0,v.tcount)
{
int k=refs[v.tstart+j].tid;
Triangle &t=triangles[k];
loopk(0,3)
{
int ofs=0,id=t.v[k];
while(ofs<vcount.size())
{
if(vids[ofs]==id)break;
ofs++;
}
if(ofs==vcount.size())
{
vcount.push_back(1);
vids.push_back(id);
}
else
vcount[ofs]++;
}
}
loopj(0,vcount.size()) if(vcount[j]==1)
vertices[vids[j]].border=1;
}
}
}
// Finally compact mesh before exiting
void compact_mesh()
{
int dst=0;
loopi(0,vertices.size())
{
vertices[i].tcount=0;
}
loopi(0,triangles.size())
if(!triangles[i].deleted)
{
Triangle &t=triangles[i];
triangles[dst++]=t;
loopj(0,3)vertices[t.v[j]].tcount=1;
}
triangles.resize(dst);
dst=0;
loopi(0,vertices.size())
if(vertices[i].tcount)
{
vertices[i].tstart=dst;
vertices[dst].p=vertices[i].p;
dst++;
}
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3)t.v[j]=vertices[t.v[j]].tstart;
}
vertices.resize(dst);
}
// Error between vertex and Quadric
double vertex_error(SymetricMatrix q, double x, double y, double z)
{
return q[0]*x*x + 2*q[1]*x*y + 2*q[2]*x*z + 2*q[3]*x + q[4]*y*y
+ 2*q[5]*y*z + 2*q[6]*y + q[7]*z*z + 2*q[8]*z + q[9];
}
// Error for one edge
double calculate_error(int id_v1, int id_v2, vec3f &p_result)
{
// compute interpolated vertex
SymetricMatrix q = vertices[id_v1].q + vertices[id_v2].q;
bool border = vertices[id_v1].border & vertices[id_v2].border;
double error=0;
double det = q.det(0, 1, 2, 1, 4, 5, 2, 5, 7);
if ( det != 0 && !border )
{
// q_delta is invertible
p_result.x = -1/det*(q.det(1, 2, 3, 4, 5, 6, 5, 7 , 8)); // vx = A41/det(q_delta)
p_result.y = 1/det*(q.det(0, 2, 3, 1, 5, 6, 2, 7 , 8)); // vy = A42/det(q_delta)
p_result.z = -1/det*(q.det(0, 1, 3, 1, 4, 6, 2, 5, 8)); // vz = A43/det(q_delta)
error = vertex_error(q, p_result.x, p_result.y, p_result.z);
}
else
{
// det = 0 -> try to find best result
vec3f p1=vertices[id_v1].p;
vec3f p2=vertices[id_v2].p;
vec3f p3=(p1+p2)/2;
double error1 = vertex_error(q, p1.x,p1.y,p1.z);
double error2 = vertex_error(q, p2.x,p2.y,p2.z);
double error3 = vertex_error(q, p3.x,p3.y,p3.z);
error = min(error1, min(error2, error3));
if (error1 == error) p_result=p1;
if (error2 == error) p_result=p2;
if (error3 == error) p_result=p3;
}
return error;
}
char *trimwhitespace(char *str)
{
char *end;
// Trim leading space
while(isspace((unsigned char)*str)) str++;
if(*str == 0) // All spaces?
return str;
// Trim trailing space
end = str + strlen(str) - 1;
while(end > str && isspace((unsigned char)*end)) end--;
// Write new null terminator
*(end+1) = 0;
return str;
}
//Option : Load OBJ
void load_obj(const char* filename, bool process_uv=false){
vertices.clear();
triangles.clear();
//printf ( "Loading Objects %s ... \n",filename);
FILE* fn;
if(filename==NULL) return ;
if((char)filename[0]==0) return ;
if ((fn = fopen(filename, "rb")) == NULL)
{
printf ( "File %s not found!\n" ,filename );
return;
}
char line[1000];
memset ( line,0,1000 );
int vertex_cnt = 0;
int material = -1;
std::map<std::string, int> material_map;
std::vector<vec3f> uvs;
std::vector<std::vector<int> > uvMap;
while(fgets( line, 1000, fn ) != NULL)
{
Vertex v;
vec3f uv;
if (strncmp(line, "mtllib", 6) == 0)
{
mtllib = trimwhitespace(&line[7]);
}
if (strncmp(line, "usemtl", 6) == 0)
{
std::string usemtl = trimwhitespace(&line[7]);
if (material_map.find(usemtl) == material_map.end())
{
material_map[usemtl] = materials.size();
materials.push_back(usemtl);
}
material = material_map[usemtl];
}
if ( line[0] == 'v' && line[1] == 't' )
{
if ( line[2] == ' ' )
if(sscanf(line,"vt %lf %lf",
&uv.x,&uv.y)==2)
{
uv.z = 0;
uvs.push_back(uv);
} else
if(sscanf(line,"vt %lf %lf %lf",
&uv.x,&uv.y,&uv.z)==3)
{
uvs.push_back(uv);
}
}
else if ( line[0] == 'v' )
{
if ( line[1] == ' ' )
if(sscanf(line,"v %lf %lf %lf",
&v.p.x, &v.p.y, &v.p.z)==3)
{
vertices.push_back(v);
}
}
int integers[9];
if ( line[0] == 'f' )
{
Triangle t;
bool tri_ok = false;
bool has_uv = false;
if(sscanf(line,"f %d %d %d",
&integers[0],&integers[1],&integers[2])==3)
{
tri_ok = true;
}else
if(sscanf(line,"f %d// %d// %d//",
&integers[0],&integers[1],&integers[2])==3)
{
tri_ok = true;
}else
if(sscanf(line,"f %d//%d %d//%d %d//%d",
&integers[0],&integers[3],
&integers[1],&integers[4],
&integers[2],&integers[5])==6)
{
tri_ok = true;
}else
if(sscanf(line,"f %d/%d/%d %d/%d/%d %d/%d/%d",
&integers[0],&integers[6],&integers[3],
&integers[1],&integers[7],&integers[4],
&integers[2],&integers[8],&integers[5])==9)
{
tri_ok = true;
has_uv = true;
}
else
{
printf("unrecognized sequence\n");
printf("%s\n",line);
while(1);
}
if ( tri_ok )
{
t.v[0] = integers[0]-1-vertex_cnt;
t.v[1] = integers[1]-1-vertex_cnt;
t.v[2] = integers[2]-1-vertex_cnt;
t.attr = 0;
if ( process_uv && has_uv )
{
std::vector<int> indices;
indices.push_back(integers[6]-1-vertex_cnt);
indices.push_back(integers[7]-1-vertex_cnt);
indices.push_back(integers[8]-1-vertex_cnt);
uvMap.push_back(indices);
t.attr |= TEXCOORD;
}
t.material = material;
//geo.triangles.push_back ( tri );
triangles.push_back(t);
//state_before = state;
//state ='f';
}
}
}
if ( process_uv && uvs.size() )
{
loopi(0,triangles.size())
{
loopj(0,3)
triangles[i].uvs[j] = uvs[uvMap[i][j]];
}
}
fclose(fn);
//printf("load_obj: vertices = %lu, triangles = %lu, uvs = %lu\n", vertices.size(), triangles.size(), uvs.size() );
} // load_obj()
// Optional : Store as OBJ
void write_obj(const char* filename)
{
FILE *file=fopen(filename, "w");
int cur_material = -1;
bool has_uv = (triangles.size() && (triangles[0].attr & TEXCOORD) == TEXCOORD);
if (!file)
{
printf("write_obj: can't write data file \"%s\".\n", filename);
exit(0);
}
if (!mtllib.empty())
{
fprintf(file, "mtllib %s\n", mtllib.c_str());
}
loopi(0,vertices.size())
{
//fprintf(file, "v %lf %lf %lf\n", vertices[i].p.x,vertices[i].p.y,vertices[i].p.z);
fprintf(file, "v %g %g %g\n", vertices[i].p.x,vertices[i].p.y,vertices[i].p.z); //more compact: remove trailing zeros
}
if (has_uv)
{
loopi(0,triangles.size()) if(!triangles[i].deleted)
{
fprintf(file, "vt %g %g\n", triangles[i].uvs[0].x, triangles[i].uvs[0].y);
fprintf(file, "vt %g %g\n", triangles[i].uvs[1].x, triangles[i].uvs[1].y);
fprintf(file, "vt %g %g\n", triangles[i].uvs[2].x, triangles[i].uvs[2].y);
}
}
int uv = 1;
loopi(0,triangles.size()) if(!triangles[i].deleted)
{
if (triangles[i].material != cur_material)
{
cur_material = triangles[i].material;
fprintf(file, "usemtl %s\n", materials[triangles[i].material].c_str());
}
if (has_uv)
{
fprintf(file, "f %d/%d %d/%d %d/%d\n", triangles[i].v[0]+1, uv, triangles[i].v[1]+1, uv+1, triangles[i].v[2]+1, uv+2);
uv += 3;
}
else
{
fprintf(file, "f %d %d %d\n", triangles[i].v[0]+1, triangles[i].v[1]+1, triangles[i].v[2]+1);
}
//fprintf(file, "f %d// %d// %d//\n", triangles[i].v[0]+1, triangles[i].v[1]+1, triangles[i].v[2]+1); //more compact: remove trailing zeros
}
fclose(file);
}
};
///////////////////////////////////////////
|
NVlabs/ACID/ACID/src/utils/libmcubes/pyarray_symbol.h |
#define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API
|
NVlabs/ACID/ACID/src/utils/libmcubes/README.rst | ========
PyMCubes
========
PyMCubes is an implementation of the marching cubes algorithm to extract
isosurfaces from volumetric data. The volumetric data can be given as a
three-dimensional NumPy array or as a Python function ``f(x, y, z)``. The first
option is much faster, but it requires more memory and becomes unfeasible for
very large volumes.
PyMCubes also provides a function to export the results of the marching cubes as
COLLADA ``(.dae)`` files. This requires the
`PyCollada <https://github.com/pycollada/pycollada>`_ library.
Installation
============
Just as any standard Python package, clone or download the project
and run::
$ cd path/to/PyMCubes
$ python setup.py build
$ python setup.py install
If you do not have write permission on the directory of Python packages,
install with the ``--user`` option::
$ python setup.py install --user
Example
=======
The following example creates a data volume with spherical isosurfaces and
extracts one of them (i.e., a sphere) with PyMCubes. The result is exported as
``sphere.dae``::
>>> import numpy as np
>>> import mcubes
# Create a data volume (30 x 30 x 30)
>>> X, Y, Z = np.mgrid[:30, :30, :30]
>>> u = (X-15)**2 + (Y-15)**2 + (Z-15)**2 - 8**2
# Extract the 0-isosurface
>>> vertices, triangles = mcubes.marching_cubes(u, 0)
# Export the result to sphere.dae
>>> mcubes.export_mesh(vertices, triangles, "sphere.dae", "MySphere")
The second example is very similar to the first one, but it uses a function
to represent the volume instead of a NumPy array::
>>> import numpy as np
>>> import mcubes
# Create the volume
>>> f = lambda x, y, z: x**2 + y**2 + z**2
# Extract the 16-isosurface
>>> vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10),
... 100, 100, 100, f, 16)
# Export the result to sphere2.dae
>>> mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere")
|
NVlabs/ACID/ACID/src/utils/libmcubes/marchingcubes.h |
#ifndef _MARCHING_CUBES_H
#define _MARCHING_CUBES_H
#include <stddef.h>
#include <vector>
namespace mc
{
extern int edge_table[256];
extern int triangle_table[256][16];
namespace private_
{
double mc_isovalue_interpolation(double isovalue, double f1, double f2,
double x1, double x2);
void mc_add_vertex(double x1, double y1, double z1, double c2,
int axis, double f1, double f2, double isovalue, std::vector<double>* vertices);
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i + dx/2;
coord_type x_dx = lower[0] + dx*(i+1) + dx/2;
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j + dy/2;
coord_type y_dy = lower[1] + dy*(j+1) + dy/2;
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k + dz/2;
coord_type z_dz = lower[2] + dz*(k+1) + dz/2;
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes2(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i;
coord_type x_dx = lower[0] + dx*(i+1);
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j;
coord_type y_dy = lower[1] + dy*(j+1);
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k;
coord_type z_dz = lower[2] + dz*(k+1);
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes3(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i - dx/2;
coord_type x_dx = lower[0] + dx*(i+1) - dx/2;
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j - dy/2;
coord_type y_dy = lower[1] + dy*(j+1) - dy/2;
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k - dz/2;
coord_type z_dz = lower[2] + dz*(k+1) - dz/2;
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
}
#endif // _MARCHING_CUBES_H
|
NVlabs/ACID/ACID/src/utils/libmcubes/pyarraymodule.h |
#ifndef _EXTMODULE_H
#define _EXTMODULE_H
#include <Python.h>
#include <stdexcept>
// #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API
#define NO_IMPORT_ARRAY
#include "numpy/arrayobject.h"
#include <complex>
template<class T>
struct numpy_typemap;
#define define_numpy_type(ctype, dtype) \
template<> \
struct numpy_typemap<ctype> \
{static const int type = dtype;};
define_numpy_type(bool, NPY_BOOL);
define_numpy_type(char, NPY_BYTE);
define_numpy_type(short, NPY_SHORT);
define_numpy_type(int, NPY_INT);
define_numpy_type(long, NPY_LONG);
define_numpy_type(long long, NPY_LONGLONG);
define_numpy_type(unsigned char, NPY_UBYTE);
define_numpy_type(unsigned short, NPY_USHORT);
define_numpy_type(unsigned int, NPY_UINT);
define_numpy_type(unsigned long, NPY_ULONG);
define_numpy_type(unsigned long long, NPY_ULONGLONG);
define_numpy_type(float, NPY_FLOAT);
define_numpy_type(double, NPY_DOUBLE);
define_numpy_type(long double, NPY_LONGDOUBLE);
define_numpy_type(std::complex<float>, NPY_CFLOAT);
define_numpy_type(std::complex<double>, NPY_CDOUBLE);
define_numpy_type(std::complex<long double>, NPY_CLONGDOUBLE);
template<typename T>
T PyArray_SafeGet(const PyArrayObject* aobj, const npy_intp* indaux)
{
// HORROR.
npy_intp* ind = const_cast<npy_intp*>(indaux);
void* ptr = PyArray_GetPtr(const_cast<PyArrayObject*>(aobj), ind);
switch(PyArray_TYPE(aobj))
{
case NPY_BOOL:
return static_cast<T>(*reinterpret_cast<bool*>(ptr));
case NPY_BYTE:
return static_cast<T>(*reinterpret_cast<char*>(ptr));
case NPY_SHORT:
return static_cast<T>(*reinterpret_cast<short*>(ptr));
case NPY_INT:
return static_cast<T>(*reinterpret_cast<int*>(ptr));
case NPY_LONG:
return static_cast<T>(*reinterpret_cast<long*>(ptr));
case NPY_LONGLONG:
return static_cast<T>(*reinterpret_cast<long long*>(ptr));
case NPY_UBYTE:
return static_cast<T>(*reinterpret_cast<unsigned char*>(ptr));
case NPY_USHORT:
return static_cast<T>(*reinterpret_cast<unsigned short*>(ptr));
case NPY_UINT:
return static_cast<T>(*reinterpret_cast<unsigned int*>(ptr));
case NPY_ULONG:
return static_cast<T>(*reinterpret_cast<unsigned long*>(ptr));
case NPY_ULONGLONG:
return static_cast<T>(*reinterpret_cast<unsigned long long*>(ptr));
case NPY_FLOAT:
return static_cast<T>(*reinterpret_cast<float*>(ptr));
case NPY_DOUBLE:
return static_cast<T>(*reinterpret_cast<double*>(ptr));
case NPY_LONGDOUBLE:
return static_cast<T>(*reinterpret_cast<long double*>(ptr));
default:
throw std::runtime_error("data type not supported");
}
}
template<typename T>
T PyArray_SafeSet(PyArrayObject* aobj, const npy_intp* indaux, const T& value)
{
// HORROR.
npy_intp* ind = const_cast<npy_intp*>(indaux);
void* ptr = PyArray_GetPtr(aobj, ind);
switch(PyArray_TYPE(aobj))
{
case NPY_BOOL:
*reinterpret_cast<bool*>(ptr) = static_cast<bool>(value);
break;
case NPY_BYTE:
*reinterpret_cast<char*>(ptr) = static_cast<char>(value);
break;
case NPY_SHORT:
*reinterpret_cast<short*>(ptr) = static_cast<short>(value);
break;
case NPY_INT:
*reinterpret_cast<int*>(ptr) = static_cast<int>(value);
break;
case NPY_LONG:
*reinterpret_cast<long*>(ptr) = static_cast<long>(value);
break;
case NPY_LONGLONG:
*reinterpret_cast<long long*>(ptr) = static_cast<long long>(value);
break;
case NPY_UBYTE:
*reinterpret_cast<unsigned char*>(ptr) = static_cast<unsigned char>(value);
break;
case NPY_USHORT:
*reinterpret_cast<unsigned short*>(ptr) = static_cast<unsigned short>(value);
break;
case NPY_UINT:
*reinterpret_cast<unsigned int*>(ptr) = static_cast<unsigned int>(value);
break;
case NPY_ULONG:
*reinterpret_cast<unsigned long*>(ptr) = static_cast<unsigned long>(value);
break;
case NPY_ULONGLONG:
*reinterpret_cast<unsigned long long*>(ptr) = static_cast<unsigned long long>(value);
break;
case NPY_FLOAT:
*reinterpret_cast<float*>(ptr) = static_cast<float>(value);
break;
case NPY_DOUBLE:
*reinterpret_cast<double*>(ptr) = static_cast<double>(value);
break;
case NPY_LONGDOUBLE:
*reinterpret_cast<long double*>(ptr) = static_cast<long double>(value);
break;
default:
throw std::runtime_error("data type not supported");
}
}
#endif
|
NVlabs/ACID/ACID/src/utils/libmcubes/__init__.py | from src.utils.libmcubes.mcubes import (
marching_cubes, marching_cubes_func
)
from src.utils.libmcubes.exporter import (
export_mesh, export_obj, export_off
)
__all__ = [
marching_cubes, marching_cubes_func,
export_mesh, export_obj, export_off
]
|
NVlabs/ACID/ACID/src/utils/libmcubes/exporter.py |
import numpy as np
def export_obj(vertices, triangles, filename):
"""
Exports a mesh in the (.obj) format.
"""
with open(filename, 'w') as fh:
for v in vertices:
fh.write("v {} {} {}\n".format(*v))
for f in triangles:
fh.write("f {} {} {}\n".format(*(f + 1)))
def export_off(vertices, triangles, filename):
"""
Exports a mesh in the (.off) format.
"""
with open(filename, 'w') as fh:
fh.write('OFF\n')
fh.write('{} {} 0\n'.format(len(vertices), len(triangles)))
for v in vertices:
fh.write("{} {} {}\n".format(*v))
for f in triangles:
fh.write("3 {} {} {}\n".format(*f))
def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"):
"""
Exports a mesh in the COLLADA (.dae) format.
Needs PyCollada (https://github.com/pycollada/pycollada).
"""
import collada
mesh = collada.Collada()
vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z'))
geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src])
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', "#verts-array")
triset = geom.createTriangleSet(np.copy(triangles), input_list, "")
geom.primitives.append(triset)
mesh.geometries.append(geom)
geomnode = collada.scene.GeometryNode(geom, [])
node = collada.scene.Node(mesh_name, children=[geomnode])
myscene = collada.scene.Scene("mcubes_scene", [node])
mesh.scenes.append(myscene)
mesh.scene = myscene
mesh.write(filename)
|
NVlabs/ACID/ACID/src/utils/libmcubes/marchingcubes.cpp |
#include "marchingcubes.h"
namespace mc
{
int edge_table[256] =
{
0x000, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x099, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x033, 0x13a, 0x636, 0x73f, 0x435, 0x53c, 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0x0aa, 0x7a6, 0x6af, 0x5a5, 0x4ac, 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x066, 0x16f, 0x265, 0x36c, 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0x0ff, 0x3f5, 0x2fc, 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x055, 0x15c, 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0x0cc, 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, 0x0cc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, 0x15c, 0x055, 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, 0x2fc, 0x3f5, 0x0ff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c, 0x36c, 0x265, 0x16f, 0x066, 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, 0x4ac, 0x5a5, 0x6af, 0x7a6, 0x0aa, 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x033, 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x099, 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c, 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x000
};
int triangle_table[256][16] =
{
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
};
namespace private_
{
double mc_isovalue_interpolation(double isovalue, double f1, double f2,
double x1, double x2)
{
if(f2==f1)
return (x2+x1)/2;
return (x2-x1)*(isovalue-f1)/(f2-f1) + x1;
}
void mc_add_vertex(double x1, double y1, double z1, double c2,
int axis, double f1, double f2, double isovalue, std::vector<double>* vertices)
{
if(axis == 0)
{
double x = mc_isovalue_interpolation(isovalue, f1, f2, x1, c2);
vertices->push_back(x);
vertices->push_back(y1);
vertices->push_back(z1);
return;
}
if(axis == 1)
{
double y = mc_isovalue_interpolation(isovalue, f1, f2, y1, c2);
vertices->push_back(x1);
vertices->push_back(y);
vertices->push_back(z1);
return;
}
if(axis == 2)
{
double z = mc_isovalue_interpolation(isovalue, f1, f2, z1, c2);
vertices->push_back(x1);
vertices->push_back(y1);
vertices->push_back(z);
return;
}
}
}
}
|
NVlabs/ACID/ACID/src/utils/libmcubes/pywrapper.cpp |
#include "pywrapper.h"
#include "marchingcubes.h"
#include <stdexcept>
struct PythonToCFunc
{
PyObject* func;
PythonToCFunc(PyObject* func) {this->func = func;}
double operator()(double x, double y, double z)
{
PyObject* res = PyObject_CallFunction(func, "(d,d,d)", x, y, z); // py::extract<double>(func(x,y,z));
if(res == NULL)
return 0.0;
double result = PyFloat_AsDouble(res);
Py_DECREF(res);
return result;
}
};
PyObject* marching_cubes_func(PyObject* lower, PyObject* upper,
int numx, int numy, int numz, PyObject* f, double isovalue)
{
std::vector<double> vertices;
std::vector<size_t> polygons;
// Copy the lower and upper coordinates to a C array.
double lower_[3];
double upper_[3];
for(int i=0; i<3; ++i)
{
PyObject* l = PySequence_GetItem(lower, i);
if(l == NULL)
throw std::runtime_error("error");
PyObject* u = PySequence_GetItem(upper, i);
if(u == NULL)
{
Py_DECREF(l);
throw std::runtime_error("error");
}
lower_[i] = PyFloat_AsDouble(l);
upper_[i] = PyFloat_AsDouble(u);
Py_DECREF(l);
Py_DECREF(u);
if(lower_[i]==-1.0 || upper_[i]==-1.0)
{
if(PyErr_Occurred())
throw std::runtime_error("error");
}
}
// Marching cubes.
mc::marching_cubes<double>(lower_, upper_, numx, numy, numz, PythonToCFunc(f), isovalue, vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
struct PyArrayToCFunc
{
PyArrayObject* arr;
PyArrayToCFunc(PyArrayObject* arr) {this->arr = arr;}
double operator()(int x, int y, int z)
{
npy_intp c[3] = {x,y,z};
return PyArray_SafeGet<double>(arr, c);
}
};
PyObject* marching_cubes(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
PyObject* marching_cubes2(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes2<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
PyObject* marching_cubes3(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes3<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
} |
NVlabs/ACID/ACID/src/utils/libmcubes/pywrapper.h |
#ifndef _PYWRAPPER_H
#define _PYWRAPPER_H
#include <Python.h>
#include "pyarraymodule.h"
#include <vector>
PyObject* marching_cubes(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes2(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes3(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes_func(PyObject* lower, PyObject* upper,
int numx, int numy, int numz, PyObject* f, double isovalue);
#endif // _PYWRAPPER_H
|
NVlabs/ACID/ACID/src/data/__init__.py |
from src.data.core import (
PlushEnvGeom, collate_remove_none, worker_init_fn, get_plush_loader
)
from src.data.transforms import (
PointcloudNoise, SubsamplePointcloud,
SubsamplePoints,
)
__all__ = [
# Core
PlushEnvGeom,
get_plush_loader,
collate_remove_none,
worker_init_fn,
PointcloudNoise,
SubsamplePointcloud,
SubsamplePoints,
]
|
NVlabs/ACID/ACID/src/data/core.py | import os
import yaml
import pickle
import torch
import logging
import numpy as np
from torch.utils import data
from torch.utils.data.dataloader import default_collate
from src.utils import plushsim_util, common_util
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
logger = logging.getLogger(__name__)
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
def set_num_threads(nt):
try:
import mkl; mkl.set_num_threads(nt)
except:
pass
torch.set_num_threads(1)
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
def collate_pair_fn(batch):
num_points = batch[0]['sampled_pts'].shape[1]
collated = {}
for key in batch[0]:
if key == 'geo_dists':
collated[key] = torch.as_tensor(np.concatenate([d[key] for d in batch]))
elif key == 'num_pairs':
indices = []
for i,d in enumerate(batch):
indices.append(np.arange(d['num_pairs']) + i * num_points)
collated["pair_indices"] = torch.as_tensor(np.concatenate(indices))
else:
collated[key] = default_collate([d[key] for d in batch])
return collated
class PlushEnvBoth(data.Dataset):
def __init__(self, flow_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.flow_root = flow_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(flow_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(flow_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, int_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, reset_id, int_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_flow_pair_data_file(
self.pair_root,split_id, model_id, reset_id, int_id))
pair_reset_id, pair_int_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, pair_reset_id, pair_int_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_flow, sampled_inds = self._prepare_points(
points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
sampled_flow = np.concatenate([sampled_flow[sampled_occ == 1.], sampled_flow[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, sampled_flow2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, \
sampled_occ, sampled_occ2, \
sampled_flow, sampled_flow2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"sampled_flow":np.stack([sampled_flow,sampled_flow2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
"start_frame":int(points_dict['start_frame']),
"end_frame":int(points_dict['end_frame']),
}
return data
def _get_pts_related_info(self, points_dict):
pts = points_dict['pts'].astype(np.float32)
occs = np.unpackbits(points_dict['occ'])
inds = points_dict['ind']
flow = np.zeros((len(pts), 3), dtype=np.float32)
flow[occs != 0] = points_dict['flow'].astype(np.float32) * 10.
return pts, occs, inds, flow
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1,occs1,inds1,flow1 = self._get_pts_related_info(points_dict)
pts2,occs2,inds2,flow2 = self._get_pts_related_info(points_dict2)
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2,
assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_flow_1 = flow1[occs1==1][id1[int_id1]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
sub_inds = common_util.subsample_points(unique_pts_1, resolution=0.03, return_index=True)
unique_pts_1 = unique_pts_1[sub_inds]
unique_flow_1 = unique_flow_1[sub_inds]
unique_occ_1 = unique_occ_1[sub_inds]
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
flow_others1 = flow1[sample_others1]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_flow1 = np.concatenate([unique_flow_1, flow_others1])
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_flow_2 = flow2[occs2==1][id2[int_id2]]
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
unique_pts_2 = unique_pts_2[sub_inds]
unique_flow_2 = unique_flow_2[sub_inds]
unique_occ_2 = unique_occ_2[sub_inds]
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
flow_others2 = flow2[sample_others2]
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
sampled_flow2 = np.concatenate([unique_flow_2, flow_others2])
geo_dists = geo_dists[sub_inds]
return sampled_pts1, sampled_pts2,\
sampled_occ1, sampled_occ2, \
sampled_flow1, sampled_flow2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud_obs'].astype(np.float32)
grasp_loc = common_util.transform_points(info_dict['grasp_loc'], scene_range, to_range)
target_loc = common_util.transform_points(info_dict['target_loc'], scene_range, to_range)
tiled_grasp_loc = np.tile(grasp_loc, (len(obj_pcloud), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc, (len(obj_pcloud), 1)).astype(np.float32)
obj_pcloud= np.concatenate([obj_pcloud, tiled_target_loc, obj_pcloud[:,:3] - tiled_grasp_loc], axis=-1)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts,occs,inds,flow = self._get_pts_related_info(points_dict)
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_flow = flow
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
sampled_flow = flow[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
flow_chosen = flow[occs!= 0][chosen]
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
flow_others = flow[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
sampled_flow= np.concatenate([flow_chosen, flow_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_flow, sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
class PlushEnvGeom(data.Dataset):
def __init__(self, geom_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.geom_root = geom_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(geom_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(geom_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, frame_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, reset_id, frame_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_pair_data_file(
self.pair_root,split_id, model_id, reset_id, frame_id))
pair_reset_id, pair_frame_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, pair_reset_id, pair_frame_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_inds = self._prepare_points(points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, sampled_occ, sampled_occ2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
}
return data
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1 = points_dict['pts'].astype(np.float32)
occs1 = np.unpackbits(points_dict['occ'])
inds1 = points_dict['ind']
pts2 = points_dict2['pts'].astype(np.float32)
occs2 = np.unpackbits(points_dict2['occ'])
inds2 = points_dict2['ind']
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2, assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
return sampled_pts1, sampled_pts2, sampled_occ1, sampled_occ2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud'].astype(np.float32)
obj_pcloud += 1e-4 * np.random.randn(*obj_pcloud.shape)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts = points_dict['pts'].astype(np.float32)
occs = points_dict['occ']
occs = np.unpackbits(occs)#[:points.shape[0]]
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
def build_transform_geom(cfg):
from . import transforms as tsf
from torchvision import transforms
transform = {}
transform['obj_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_obj']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
transform['env_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_env']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
return transform
def get_geom_dataset(cfg, split='train', transform='build'):
geom_root = cfg['data']['geom_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvGeom(geom_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_combined_dataset(cfg, split='train', transform='build'):
flow_root = cfg['data']['flow_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvBoth(flow_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_plush_loader(cfg, mode, split='train', transform='build', test_shuffle=False, num_workers=None):
if mode == 'geom':
dataset = get_geom_dataset(cfg, split, transform)
elif mode == 'combined':
dataset = get_combined_dataset(cfg, split, transform)
if split == 'train':
loader = torch.utils.data.DataLoader(
dataset, batch_size=cfg['training']['batch_size'],
num_workers=cfg['training']['n_workers'],
shuffle=True,
collate_fn=collate_pair_fn,
worker_init_fn=worker_init_fn)
else:
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=test_shuffle,
collate_fn=collate_pair_fn)
return loader
def get_plan_loader(cfg, transform='build', category="teddy",num_workers=None):
transform = build_transform_geom(cfg)
dataset = PlushEnvPlan(cfg['data']['plan_path'], category=category, transform=transform)
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=False,)
return loader
class PlushEnvPlan(data.Dataset):
def __init__(self, plan_root, category="teddy",transform={}):
# Attributes
self.plan_root = plan_root
self.transform = transform
self.category = category
import glob
self.scenarios = glob.glob(f'{plan_root}/**/*.npz', recursive=True)
self.scenarios = [x for x in self.scenarios if category in x][:-1]
self.scenarios.sort()
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.scenarios)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
# load frame and get partial observation
infos = np.load(self.scenarios[idx])
obj_pcloud_start, env_pcloud_start = self._prepare_partial_obs(infos, "start")
obj_pcloud_end, env_pcloud_end = self._prepare_partial_obs(infos, "end")
action = infos['actions'].astype(np.float32)
pts_start, occ_start, ind_start = self._get_pts_related_info(infos, 'start')
pts_end, occ_end, ind_end = self._get_pts_related_info(infos, 'end')
data = {
"obj_obs_start":obj_pcloud_start,
"env_obs_start":env_pcloud_start,
"obj_obs_end":obj_pcloud_end,
"env_obs_end":env_pcloud_end,
'gt_pts_start': infos['sim_pts_start'].astype(np.float32),
'gt_pts_end': infos['sim_pts_end'].astype(np.float32),
'sampled_pts_start': pts_start,
'sampled_occ_start': occ_start,
'sampled_ind_start': ind_start,
'sampled_pts_end': pts_end,
'sampled_occ_end': occ_end,
'sampled_ind_end': ind_end,
"actions": action,
"sequence_ids":infos['sequence_ids'],
"fname":self.scenarios[idx],
"idx":idx,
}
return data
def _prepare_partial_obs(self, info_dict, key):
# obj partial observation
obj_pcloud = info_dict[f'obj_pcloud_{key}'].astype(np.float32)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict[f'env_pcloud_{key}'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
def _get_pts_related_info(self, points_dict, key):
pts = points_dict[f'pts_{key}'].astype(np.float32)
occs = np.unpackbits(points_dict[f'occ_{key}']).astype(np.float32)
inds = points_dict[f'ind_{key}'].astype(np.int32)
return pts, occs, inds |
NVlabs/ACID/ACID/src/data/transforms.py | import numpy as np
# Transforms
class PointcloudNoise(object):
''' Point cloud noise transformation class.
It adds noise to point cloud data.
Args:
stddev (int): standard deviation
'''
def __init__(self, stddev):
self.stddev = stddev
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
data_out = data.copy()
points = data[None]
noise = self.stddev * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
data_out[None] = points + noise
return data_out
class SubsamplePointcloud(object):
''' Point cloud subsampling transformation class.
It subsamples the point cloud data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
indices = np.random.randint(data.shape[0], size=self.N)
return data[indices]
class SubsamplePoints(object):
''' Points subsampling transformation class.
It subsamples the points data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
points = data[None]
occ = data['occ']
ind = data['ind']
flow1 = data['flow1']
flow2 = data['flow2']
data_out = data.copy()
if isinstance(self.N, int):
idx = np.random.randint(points.shape[0], size=self.N)
data_out.update({
None: points[idx, :],
'occ': occ[idx],
'ind': ind[idx],
'flow1': flow1[idx],
'flow2': flow2[idx],
})
else:
Nt_out, Nt_in = self.N
occ_binary = (occ >= 0.5)
points0 = points[~occ_binary]
points1 = points[occ_binary]
ind0 = ind[~occ_binary]
ind1 = ind[occ_binary]
flow10 = flow1[~occ_binary]
flow11 = flow1[occ_binary]
flow20 = flow2[~occ_binary]
flow21 = flow2[occ_binary]
idx0 = np.random.randint(points0.shape[0], size=Nt_out)
idx1 = np.random.randint(points1.shape[0], size=Nt_in)
points0 = points0[idx0, :]
points1 = points1[idx1, :]
points = np.concatenate([points0, points1], axis=0)
ind0 = ind0[idx0]
ind1 = ind1[idx1]
ind = np.concatenate([ind0, ind1], axis=0)
flow10 = flow10[idx0]
flow11 = flow11[idx1]
flow1 = np.concatenate([flow10, flow11], axis=0)
flow20 = flow20[idx0]
flow21 = flow21[idx1]
flow2 = np.concatenate([flow20, flow21], axis=0)
occ0 = np.zeros(Nt_out, dtype=np.float32)
occ1 = np.ones(Nt_in, dtype=np.float32)
occ = np.concatenate([occ0, occ1], axis=0)
volume = occ_binary.sum() / len(occ_binary)
volume = volume.astype(np.float32)
data_out.update({
None: points,
'occ': occ,
'volume': volume,
'ind': ind,
'flow1': flow1,
'flow2': flow2,
})
return data_out
|
NVlabs/ACID/ACID/configs/default.yaml | method: conv_onet
data:
train_split: train
val_split: val
test_split: test
dim: 3
act_dim: 6
padding: 0.1
type: geom
model:
decoder: simple
encoder: resnet18
decoder_kwargs: {}
encoder_kwargs: {}
multi_gpu: false
c_dim: 512
training:
out_dir: out/default
batch_size: 64
pos_weight: 5
print_every: 200
visualize_every: 1000
visualize_total: 15
checkpoint_every: 1000
validate_every: 2000
backup_every: 100000
eval_sample: false
model_selection_metric: loss
model_selection_mode: minimize
n_workers: 4
n_workers_val: 4
test:
threshold: 0.5
eval_mesh: true
eval_pointcloud: true
remove_wall: false
model_file: model_best.pt
generation:
batch_size: 100000
refinement_step: 0
vis_n_outputs: 30
generate_mesh: true
generate_pointcloud: true
generation_dir: generation
use_sampling: false
resolution_0: 32
upsampling_steps: 3
simplify_nfaces: null
copy_groundtruth: false
copy_input: true
latent_number: 4
latent_H: 8
latent_W: 8
latent_ny: 2
latent_nx: 2
latent_repeat: true
sliding_window: False # added for crop generation |
NVlabs/ACID/ACID/configs/plush_dyn_geodesics.yaml | method: conv_onet
data:
flow_path: train_data/flow
pair_path: train_data/pair
pointcloud_n_obj: 5000
pointcloud_n_env: 1000
pointcloud_noise: 0.005
points_subsample: 3000
model:
type: combined
obj_encoder_kwargs:
f_dim: 3
hidden_dim: 64
plane_resolution: 128
unet_kwargs:
depth: 4
merge_mode: concat
start_filts: 64
env_encoder_kwargs:
f_dim: 3
hidden_dim: 16
plane_resolution: 64
unet_kwargs:
depth: 2
merge_mode: concat
start_filts: 16
decoder_kwargs:
corr_dim: 32
sample_mode: bilinear # bilinear / nearest
hidden_size: 32
obj_c_dim: 64
env_c_dim: 16
loss:
type: contrastive
contrastive_threshold: 1
use_geodesics: true
scale_with_geodesics: False
training:
out_dir: result/dyn/geodesics
batch_size: 4
model_selection_metric: flow
model_selection_mode: minimize
print_every: 1
visualize_every: 4000
validate_every: 4000
checkpoint_every: 4000
backup_every: 4000
n_workers: 16
n_workers_val: 4
test:
threshold: 0.95
eval_mesh: true
eval_pointcloud: false
model_file: model_best.pt
generation:
refine: false
n_x: 128
n_z: 1
|
NVlabs/ACID/ACID/metadata/camera.json | {"cam0": [[[1.0, 0.0, 0.0, 0.0], [0.0, 0.30901699437494745, 0.9510565162951536, 9.500000000000028], [0.0, -0.9510565162951536, 0.30901699437494745, 3.000000000000007], [0.0, 0.0, 0.0, 1.0]], [[305.4163695204686, 0.0, 180.0], [0.0, 305.4163695204686, 320.0], [0.0, 0.0, 1.0]]], "cam1": [[[6.861555643110583e-17, -0.9510565162951536, 0.30901699437494745, 2.9999999999999964], [2.11176968422134e-16, 0.30901699437494745, 0.9510565162951536, 9.500000000000028], [-1.0, 0.0, 2.220446049250313e-16, -6.861555643110581e-15], [0.0, 0.0, 0.0, 1.0]], [[305.4163695204686, 0.0, 180.0], [0.0, 305.4163695204686, 320.0], [0.0, 0.0, 1.0]]], "cam2": [[[6.861555643110583e-17, 0.9510565162951536, -0.30901699437494745, -3.0], [-2.11176968422134e-16, 0.30901699437494745, 0.9510565162951536, 9.500000000000028], [1.0, 0.0, 2.220446049250313e-16, 6.861555643110581e-15], [0.0, 0.0, 0.0, 1.0]], [[305.4163695204686, 0.0, 180.0], [0.0, 305.4163695204686, 320.0], [0.0, 0.0, 1.0]]], "cam3": [[[-1.0, -9.662289450897499e-17, 1.601616662667754e-16, 1.892183365217075e-15], [1.2246467991473532e-16, 0.30901699437494745, 0.9510565162951536, 9.500000000000028], [-1.413865101699022e-16, 0.9510565162951536, -0.30901699437494745, -3.000000000000007], [0.0, 0.0, 0.0, 1.0]], [[305.4163695204686, 0.0, 180.0], [0.0, 305.4163695204686, 320.0], [0.0, 0.0, 1.0]]]} |
NVlabs/ACID/ACID/preprocess/gen_data_flow_plush.py | import numpy as np
import os
import time, datetime
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import json
from src.utils import plushsim_util
from src.utils import common_util
import glob
import tqdm
from multiprocessing import Pool
import argparse
parser = argparse.ArgumentParser("Training Flow Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--save_root", type=str, default=flow_default)
args = parser.parse_args()
data_root = args.data_root
save_root = args.save_root
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
class_to_std = {
'teddy':0.12,
'elephant':0.15,
'octopus':0.12,
'rabbit':0.08,
'dog':0.08,
'snake':0.04,
}
def export_train_data(data_id):
# try:
# load action info
split_id, model_category, model_name, reset_id, interaction_id = data_id
grasp_loc, target_loc, f1, _, f2 = plushsim_util.get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root)
# get observations
obj_pts1, env_pts1 = plushsim_util.get_scene_partial_pointcloud(
model_category, model_name, split_id, reset_id, f1, data_root)
obj_pts1=common_util.subsample_points(
common_util.transform_points(obj_pts1, scene_range, to_range), resolution=0.005, return_index=False)
env_pts1=common_util.subsample_points(
common_util.transform_points(env_pts1, scene_range, to_range), resolution=0.020, return_index=False)
# calculate flow
sim_pts1, _, loc,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f1, data_root)
sim_pts2, _,_,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f2, data_root)
sim_pts1=common_util.transform_points(sim_pts1, scene_range, to_range)
sim_pts2=common_util.transform_points(sim_pts2, scene_range, to_range)
sim_pts_flow = sim_pts2 - sim_pts1
# sample occupancy
center =common_util.transform_points(loc, scene_range, to_range)[0]
pts, occ, pt_class = plushsim_util.sample_occupancies(sim_pts1, center,
std=class_to_std[model_category],sample_scheme='object')
# get implicit flows
flow = sim_pts_flow[pt_class]
# save
kwargs = {'sim_pts':sim_pts1.astype(np.float16),
'obj_pcloud_obs':obj_pts1.astype(np.float16),
'env_pcloud':env_pts1.astype(np.float16),
'pts':pts.astype(np.float16),
'occ':np.packbits(occ),
'ind':pt_class.astype(np.uint16),
'flow':flow.astype(np.float16),
'start_frame':f1,
'end_frame':f2,
'grasp_loc':grasp_loc,
'target_loc': target_loc}
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
save_path = os.path.join(model_dir, f"{reset_id:03d}_{interaction_id:03d}.npz")
np.savez_compressed(save_path, **kwargs)
def get_all_data_points_flow(data_root):
good_interactions = glob.glob(f"{data_root}/*/*/*/info/good_interactions.json")
good_ints = []
for g in tqdm.tqdm(good_interactions):
split_id, model_category, model_name = g.split('/')[-5:-2]
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
os.makedirs(model_dir, exist_ok=True)
model_dir = plushsim_util.get_model_dir(data_root, split_id, model_category, model_name)
with open(g, 'r') as fp:
good_ones = json.load(fp)
for k,v in good_ones.items():
reset_id = int(k)
for int_id in v:
good_ints.append((split_id, model_category, model_name, reset_id, int_id))
return good_ints
good_ints = get_all_data_points_flow(data_root)#[:100]
start_time = time.time()
with Pool(40) as p:
for _ in tqdm.tqdm(p.imap_unordered(export_train_data, good_ints), total=len(good_ints)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}') |
NVlabs/ACID/ACID/preprocess/gen_data_contrastive_pairs_flow.py | import os
import sys
import glob
import tqdm
import random
import argparse
import numpy as np
import os.path as osp
import time
from multiprocessing import Pool
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
parser = argparse.ArgumentParser("Training Contrastive Pair Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
meta_default = osp.join(ACID_dir, "data_plush", "metadata")
flow_default = osp.join(ACID_dir, "train_data", "flow")
pair_default = osp.join(ACID_dir, "train_data", "pair")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--meta_root", type=str, default=meta_default)
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--save_root", type=str, default=pair_default)
args = parser.parse_args()
data_root = args.data_root
flow_root = args.flow_root
save_root = args.save_root
meta_root = args.meta_root
os.makedirs(save_root, exist_ok=True)
def using_complex(a):
weight = 1j*np.linspace(0, a.shape[1], a.shape[0], endpoint=False)
b = a + weight[:, np.newaxis]
u, ind = np.unique(b, return_index=True)
b = np.zeros_like(a) + 256
np.put(b, ind, a.flat[ind])
return b
def process(pair, num_samples=320, keep=80):
split_id, model_name, f,p = pair
src_file = np.load(f"{flow_root}/{split_id}/{model_name}/{f}")
tgt_file = np.load(f"{flow_root}/{split_id}/{model_name}/{p}")
src_inds = src_file['ind']
tgt_inds = tgt_file['ind']
src_inds = np.tile(src_inds, (num_samples,1)).T
tgt_samples = np.random.randint(0, high=len(tgt_inds) - 1, size=(len(src_inds), num_samples))
tgt_samples_inds = tgt_inds[tgt_samples]
dists = dist_matrix[src_inds.reshape(-1), tgt_samples_inds.reshape(-1)].reshape(*src_inds.shape)
dists_unique = using_complex(dists)
idx = np.argsort(dists_unique, axis=-1)
dists_sorted = np.take_along_axis(dists, idx, axis=-1).astype(np.uint8)[:,:keep]
tgt_samples_sorted = np.take_along_axis(tgt_samples, idx, axis=-1)[:,:keep]
if tgt_samples_sorted.max() <= np.iinfo(np.uint16).max:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint16)
else:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint32)
results = {"target_file":p, "dists":dists_sorted, "inds":tgt_samples_sorted}
np.savez_compressed(os.path.join(save_dir, f"pair_{f}"), **results)
def export_pair_data(data_id):
split_id, model_name = data_id
all_files = all_geoms[data_id]
print(split_id, model_name)
global dist_matrix
dist_matrix = np.load(f'{meta_root}/{split_id}/{model_name}_dist.npz')['arr_0']
global save_dir
save_dir = os.path.join(save_root, split_id, model_name)
os.makedirs(save_dir, exist_ok=True)
pairs = [ (split_id, model_name, f,random.choice(all_files)) for f in all_files ]
start_time = time.time()
with Pool(10) as p:
for _ in tqdm.tqdm(p.imap_unordered(process, pairs), total=len(all_files)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}')
if __name__ == '__main__':
from collections import defaultdict
global all_geoms
all_geoms = defaultdict(lambda: [])
for g in glob.glob(f"{flow_root}/*/*/*"):
split_id, model_name, file_name = g.split('/')[-3:]
all_geoms[(split_id, model_name)].append(file_name)
for k in all_geoms.keys():
export_pair_data(k)
|
NVlabs/ACID/ACID/preprocess/gen_data_flow_splits.py | import os
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import glob
import argparse
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser = argparse.ArgumentParser("Making training / testing splits...")
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--no_split", action="store_true", default=False)
args = parser.parse_args()
flow_root = args.flow_root
all_npz = glob.glob(f"{flow_root}/*/*/*.npz")
print(f"In total {len(all_npz)} data points...")
def filename_to_id(fname):
split_id, model_name, f = fname.split("/")[-3:]
reset_id, frame_id = (int(x) for x in os.path.splitext(f)[0].split('_'))
return split_id, model_name, reset_id, frame_id
from collections import defaultdict
total_files = defaultdict(lambda : defaultdict(lambda : []))
for fname in all_npz:
split_id, model_name, reset_id, frame_id = filename_to_id(fname)
total_files[(split_id, model_name)][reset_id].append(frame_id)
total_files = dict(total_files)
for k,v in total_files.items():
total_files[k] = dict(v)
import pickle
if args.no_split:
train = total_files
test = total_files
else:
train = {}
test = {}
for k,v in total_files.items():
split_id, model_name = k
if "teddy" in model_name:
test[k] = v
else:
train[k] = v
train_total = []
for k,v in train.items():
for x, u in v.items():
for y in u:
train_total.append((*k, x, y))
print(f"training data points: {len(train_total)}")
test_total = []
for k,v in test.items():
for x, u in v.items():
for y in u:
test_total.append((*k, x, y))
print(f"testing data points: {len(test_total)}")
with open(f"{flow_root}/train.pkl", "wb") as fp:
pickle.dump(train_total, fp)
with open(f"{flow_root}/test.pkl", "wb") as fp:
pickle.dump(test_total, fp) |
erasromani/isaac-sim-python/simulate_grasp.py | import os
import argparse
from grasp.grasp_sim import GraspSimulator
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.synthetic_utils import OmniKitHelper
def main(args):
kit = OmniKitHelper(
{"renderer": "RayTracedLighting", "experience": f"{os.environ['EXP_PATH']}/isaac-sim-python.json", "width": args.width, "height": args.height}
)
_mp = _motion_planning.acquire_motion_planning_interface()
_dc = _dynamic_control.acquire_dynamic_control_interface()
if args.video: record = True
else: record = False
sim = GraspSimulator(kit, _dc, _mp, record=record)
# add object path
if args.location == 'local': from_server = False
else: from_server = True
for path in args.path:
sim.add_object_path(path, from_server=from_server)
# start simulation
sim.play()
for _ in range(args.num):
sim.add_object(position=(40, 0, 10))
sim.wait_for_drop()
sim.wait_for_loading()
evaluation = sim.execute_grasp(args.position, args.angle)
output_string = f"Grasp evaluation: {evaluation}"
print('\n' + ''.join(['#'] * len(output_string)))
print(output_string)
print(''.join(['#'] * len(output_string)) + '\n')
# Stop physics simulation
sim.stop()
if record: sim.save_video(args.video)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate Panda arm planar grasp execution in NVIDIA Omniverse Isaac Sim')
required = parser.add_argument_group('required arguments')
required.add_argument('-P', '--path', type=str, nargs='+', metavar='', required=True, help='path to usd file or content folder')
required.add_argument('-p', '--position', type=float, nargs=3, metavar='', required=True, help='grasp position, X Y Z')
required.add_argument('-a', '--angle', type=float, metavar='', required=True, help='grasp angle in degrees')
parser.add_argument('-l', '--location', type=str, metavar='', required=False, help='location of usd path, choices={local, nucleus_server}', choices=['local', 'nucleus_server'], default='local')
parser.add_argument('-n', '--num', type=int, metavar='', required=False, help='number of objects to spawn in the scene', default=1)
parser.add_argument('-v', '--video', type=str, metavar='', required=False, help='output filename of grasp simulation video')
parser.add_argument('-W', '--width', type=int, metavar='', required=False, help='width of the viewport and generated images', default=1024)
parser.add_argument('-H', '--height', type=int, metavar='', required=False, help='height of the viewport and generated images', default=800)
args = parser.parse_args()
print(args.path)
main(args) |
erasromani/isaac-sim-python/README.md | # isaac-sim-python: Python wrapper for NVIDIA Omniverse Isaac-Sim
## Overview
This repository contains a collection of python wrappers for NVIDIA Omniverse Isaac-Sim simulations. `grasp` package simulates a planar grasp execution of a Panda arm in a scene with various rigid objects place in a bin.
## Installation
This repository requires installation of NVIDIA Omniverse Isaac-Sim. A comprehensive setup tutorial is provided in the official [NVIDIA Omniverse Isaac-Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/setup.html) documentation. Following installation of Isaac-Sim, a conda environment must also be created that contains all the required packages for the python wrappers. Another comprehensive conda environment setup tutorial is provided in this [link](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/python_samples.html).
`ffmpeg-python` must be installed within the `isaac-sim` conda environment and can be aquired via a typical pip install:
```
conda activate isaac-sim
pip install ffmpeg-python
```
Lastly, clone the repository into the `python-samples` sub-directory within the `isaac-sim` directory.
```
git clone https://github.com/erasromani/isaac-sim-python.git
```
## Quickstart
Navigate to the `python-samples` sub-directory within the `isaac-sim` directory, source environment variables, activate conda environment, and run `simulate_grasp.py`.
```
source setenv.sh
conda activate isaac-sim
cd isaac-sim-python
python simulate_grasp.py -P Isaac/Props/Flip_Stack/large_corner_bracket_physics.usd Isaac/Props/Flip_Stack/screw_95_physics.usd Isaac/Props/Flip_Stack/t_connector_physics.usd -l nucleus_server -p 40 0 5 -a 45 -n 5 -v sim.mp4
```
The code above will simulate grasp execution of Panda arm in a scene with a bin and objects 5 randomly selected objects selected from the collection of usd files given. The specified grasp pose is a planar grasp with grasp position `(40, 0, 5)` and angle `5` degrees. A video of the simulation will be generated and saved as `sim.mp4`.
## Additional Resources
- https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html
- https://docs.omniverse.nvidia.com/py/isaacsim/index.html
|
erasromani/isaac-sim-python/grasp/__init__.py | |
erasromani/isaac-sim-python/grasp/grasp_sim.py | import os
import numpy as np
import tempfile
import omni.kit
from omni.isaac.synthetic_utils import SyntheticDataHelper
from grasp.utils.isaac_utils import RigidBody
from grasp.grasping_scenarios.grasp_object import GraspObject
from grasp.utils.visualize import screenshot, img2vid
default_camera_pose = {
'position': (142, -127, 56), # position given by (x, y, z)
'target': (-180, 234, -27) # target given by (x, y , z)
}
class GraspSimulator(GraspObject):
""" Defines a grasping simulation scenario
Scenarios define planar grasp execution in a scene of a Panda arm and various rigid objects
"""
def __init__(self, kit, dc, mp, dt=1/60.0, record=False, record_interval=10):
"""
Initializes grasp simulator
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
dt (float): simulation time step in seconds
record (bool): flag for capturing screenshots throughout simulation for video recording
record_interval (int): frame intervals for capturing screenshots
"""
super().__init__(kit, dc, mp)
self.frame = 0
self.dt = dt
self.record = record
self.record_interval = record_interval
self.tmp_dir = tempfile.mkdtemp()
self.sd_helper = SyntheticDataHelper()
# create initial scene
self.create_franka()
# set camera pose
self.set_camera_pose(default_camera_pose['position'], default_camera_pose['target'])
def execute_grasp(self, position, angle):
"""
Executes a planar grasp with a panda arm.
Args:
position (list or numpy.darray): grasp position array of length 3 given by [x, y, z]
angle (float): grap angle in degrees
Returns:
evaluation (enum.EnumMeta): GRASP_eval class containing two states {GRASP_eval.FAILURE, GRAPS_eval.SUCCESS}
"""
self.set_target_angle(angle)
self.set_target_position(position)
self.perform_tasks()
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if self.pick_and_place is not None:
while True:
self.step(0)
self.update()
if self.pick_and_place.evaluation is not None:
break
evaluation = self.pick_and_place.evaluation
self.stop_tasks()
self.step(0)
self.update()
# Stop physics simulation
if not previously_playing: self.stop()
return evaluation
def wait_for_drop(self, max_steps=2000):
"""
Waits for all objects to drop.
Args:
max_steps (int): maximum number of timesteps before aborting wait
"""
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if not previously_playing: self.play()
step = 0
while step < max_steps or self._kit.is_loading():
self.step(step)
self.update()
objects_speed = np.array([o.get_speed() for o in self.objects])
if np.all(objects_speed == 0): break
step +=1
# Stop physics simulation
if not previously_playing: self.stop()
def wait_for_loading(self):
"""
Waits for all scene visuals to load.
"""
while self.is_loading():
self.update()
def play(self):
"""
Starts simulation.
"""
self._kit.play()
if not hasattr(self, 'world') or not hasattr(self, 'franka_solid') or not hasattr(self, 'bin_solid') or not hasattr(self, 'pick_and_place'):
self.register_scene()
def stop(self):
"""
Stops simulation.
"""
self._kit.stop()
def update(self):
"""
Simulate one time step.
"""
if self.record and self.sd_helper is not None and self.frame % self.record_interval == 0:
screenshot(self.sd_helper, suffix=self.frame, directory=self.tmp_dir)
self._kit.update(self.dt)
self.frame += 1
def is_loading(self):
"""
Determine if all scene visuals are loaded.
Returns:
(bool): flag for whether or not all scene visuals are loaded
"""
return self._kit.is_loading()
def set_camera_pose(self, position, target):
"""
Set camera pose.
Args:
position (list or numpy.darray): camera position array of length 3 given by [x, y, z]
target (list or numpy.darray): target position array of length 3 given by [x, y, z]
"""
self._editor.set_camera_position("/OmniverseKit_Persp", *position, True)
self._editor.set_camera_target("/OmniverseKit_Persp", *target, True)
def save_video(self, path):
"""
Save video recording of screenshots taken throughout the simulation.
Args:
path (str): output video filename
"""
framerate = int(round(1.0 / (self.record_interval * self.dt)))
img2vid(os.path.join(self.tmp_dir, '*.png'), path, framerate=framerate)
|
erasromani/isaac-sim-python/grasp/grasping_scenarios/scenario.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import gc
import carb
import omni.usd
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from grasp.utils.isaac_utils import set_up_z_axis
class Scenario:
"""
Defines a block stacking scenario.
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, editor, dc, mp):
"""
Initialize scenario.
Args:
editor (omni.kit.editor._editor.IEditor): editor object from isaac-sim simulation
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
self._editor = editor # Reference to the Kit editor
self._stage = omni.usd.get_context().get_stage() # Reference to the current USD stage
self._dc = dc # Reference to the dynamic control plugin
self._mp = mp # Reference to the motion planning plugin
self._domains = [] # Contains instances of environment
self._obstacles = [] # Containts references to any obstacles in the scenario
self._executor = None # Contains the thread pool used to run tasks
self._created = False # Is the robot created or not
self._running = False # Is the task running or not
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
self.robot_created = False
self._domains = []
self._obstacles = []
self._executor = None
gc.collect()
def reset_blocks(self, *args):
"""
Funtion called when block poses are reset.
"""
pass
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
self._running = False
pass
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
pass
def create_franka(self, *args):
"""
Create franka USD objects.
"""
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
# USD paths loaded by scenarios
self.franka_table_usd = self.asset_path + "/Samples/Leonardo/Stage/franka_block_stacking.usd"
self.franka_ghost_usd = self.asset_path + "/Samples/Leonardo/Robots/franka_ghost.usd"
self.background_usd = self.asset_path + "/Environments/Grid/gridroom_curved.usd"
self.rubiks_cube_usd = self.asset_path + "/Props/Rubiks_Cube/rubiks_cube.usd"
self.red_cube_usd = self.asset_path + "/Props/Blocks/red_block.usd"
self.yellow_cube_usd = self.asset_path + "/Props/Blocks/yellow_block.usd"
self.green_cube_usd = self.asset_path + "/Props/Blocks/green_block.usd"
self.blue_cube_usd = self.asset_path + "/Props/Blocks/blue_block.usd"
self._created = True
self._stage = omni.usd.get_context().get_stage()
set_up_z_axis(self._stage)
self.stop_tasks()
pass
def register_assets(self, *args):
"""
Connect franka controller to usd assets
"""
pass
def task(self, domain):
"""
Task to be performed for a given robot.
"""
pass
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._running = True
pass
def is_created(self):
"""
Return if the franka was already created.
"""
return self._created
|
erasromani/isaac-sim-python/grasp/grasping_scenarios/__init__.py | |
erasromani/isaac-sim-python/grasp/grasping_scenarios/grasp_object.py | # Credits: Starter code taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import os
import random
import numpy as np
import glob
import omni
import carb
from enum import Enum
from collections import deque
from pxr import Gf, UsdGeom
from copy import copy
from omni.physx.scripts.physicsUtils import add_ground_plane
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils._isaac_utils import math as math_utils
from omni.isaac.samples.scripts.utils.world import World
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.physx import _physx
from grasp.utils.isaac_utils import create_prim_from_usd, RigidBody, set_translate, set_rotate, setup_physics
from grasp.grasping_scenarios.franka import Franka, default_config
from grasp.grasping_scenarios.scenario import Scenario
statedic = {0: "orig", 1: "axis_x", 2: "axis_y", 3: "axis_z"}
class SM_events(Enum):
"""
State machine events.
"""
START = 0
WAYPOINT_REACHED = 1
GOAL_REACHED = 2
ATTACHED = 3
DETACHED = 4
TIMEOUT = 5
STOP = 6
NONE = 7 # no event ocurred, just clocks
class SM_states(Enum):
"""
State machine states.
"""
STANDBY = 0 # Default state, does nothing unless enters with event START
PICKING = 1
ATTACH = 2
HOLDING = 3
GRASPING = 4
LIFTING = 5
class GRASP_eval(Enum):
"""
Grasp execution evaluation.
"""
FAILURE = 0
SUCCESS = 1
class PickAndPlaceStateMachine(object):
"""
Self-contained state machine class for Robot Behavior. Each machine state may react to different events,
and the handlers are defined as in-class functions.
"""
def __init__(self, stage, robot, ee_prim, default_position):
"""
Initialize state machine.
Args:
stage (pxr.Usd.Stage): usd stage
robot (grasp.grasping_scenarios.franka.Franka): robot controller object
ee_prim (pxr.Usd.Prim): Panda arm end effector prim
default_position (omni.isaac.dynamic_control._dynamic_control.Transform): default position of Panda arm
"""
self.robot = robot
self.dc = robot.dc
self.end_effector = ee_prim
self.end_effector_handle = None
self._stage = stage
self.start_time = 0.0
self.start = False
self._time = 0.0
self.default_timeout = 10
self.default_position = copy(default_position)
self.target_position = default_position
self.target_point = default_position.p
self.target_angle = 0 # grasp angle in degrees
self.reset = False
self.evaluation = None
self.waypoints = deque()
self.thresh = {}
# Threshold to clear waypoints/goal
# (any waypoint that is not final will be cleared with the least precision)
self.precision_thresh = [
[0.0005, 0.0025, 0.0025, 0.0025],
[0.0005, 0.005, 0.005, 0.005],
[0.05, 0.2, 0.2, 0.2],
[0.08, 0.4, 0.4, 0.4],
[0.18, 0.6, 0.6, 0.6],
]
self.add_object = None
# Event management variables
# Used to verify if the goal was reached due to robot moving or it had never left previous target
self._is_moving = False
self._attached = False # Used to flag the Attached/Detached events on a change of state from the end effector
self._detached = False
self.is_closed = False
self.pick_count = 0
# Define the state machine handling functions
self.sm = {}
# Make empty state machine for all events and states
for s in SM_states:
self.sm[s] = {}
for e in SM_events:
self.sm[s][e] = self._empty
self.thresh[s] = 0
# Fill in the functions to handle each event for each status
self.sm[SM_states.STANDBY][SM_events.START] = self._standby_start
self.sm[SM_states.STANDBY][SM_events.GOAL_REACHED] = self._standby_goal_reached
self.thresh[SM_states.STANDBY] = 3
self.sm[SM_states.PICKING][SM_events.GOAL_REACHED] = self._picking_goal_reached
self.thresh[SM_states.PICKING] = 1
self.sm[SM_states.GRASPING][SM_events.ATTACHED] = self._grasping_attached
self.sm[SM_states.LIFTING][SM_events.GOAL_REACHED] = self._lifting_goal_reached
for s in SM_states:
self.sm[s][SM_events.DETACHED] = self._all_detached
self.sm[s][SM_events.TIMEOUT] = self._all_timeout
self.current_state = SM_states.STANDBY
self.previous_state = -1
self._physxIFace = _physx.acquire_physx_interface()
# Auxiliary functions
def _empty(self, *args):
"""
Empty function to use on states that do not react to some specific event.
"""
pass
def change_state(self, new_state, print_state=True):
"""
Function called every time a event handling changes current state.
"""
self.current_state = new_state
self.start_time = self._time
if print_state: carb.log_warn(str(new_state))
def goalReached(self):
"""
Checks if the robot has reached a certain waypoint in the trajectory.
"""
if self._is_moving:
state = self.robot.end_effector.status.current_frame
target = self.robot.end_effector.status.current_target
error = 0
for i in [0, 2, 3]:
k = statedic[i]
state_v = state[k]
target_v = target[k]
error = np.linalg.norm(state_v - target_v)
# General Threshold is the least strict
thresh = self.precision_thresh[-1][i]
if len(self.waypoints) == 0:
thresh = self.precision_thresh[self.thresh[self.current_state]][i]
if error > thresh:
return False
self._is_moving = False
return True
return False
def get_current_state_tr(self):
"""
Gets current End Effector Transform, converted from Motion position and Rotation matrix.
"""
# Gets end effector frame
state = self.robot.end_effector.status.current_frame
orig = state["orig"] * 100.0
mat = Gf.Matrix3f(
*state["axis_x"].astype(float), *state["axis_y"].astype(float), *state["axis_z"].astype(float)
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
tr = _dynamic_control.Transform()
tr.p = list(orig)
tr.r = q
return tr
def lerp_to_pose(self, pose, n_waypoints=1):
"""
adds spherical linear interpolated waypoints from last pose in the waypoint list to the provided pose
if the waypoit list is empty, use current pose.
"""
if len(self.waypoints) == 0:
start = self.get_current_state_tr()
start.p = math_utils.mul(start.p, 0.01)
else:
start = self.waypoints[-1]
if n_waypoints > 1:
for i in range(n_waypoints):
self.waypoints.append(math_utils.slerp(start, pose, (i + 1.0) / n_waypoints))
else:
self.waypoints.append(pose)
def move_to_zero(self):
self._is_moving = False
self.robot.end_effector.go_local(
orig=[], axis_x=[], axis_y=[], axis_z=[], use_default_config=True, wait_for_target=False, wait_time=5.0
)
def move_to_target(self):
"""
Move arm towards target with RMP controller.
"""
xform_attr = self.target_position
self._is_moving = True
orig = np.array([xform_attr.p.x, xform_attr.p.y, xform_attr.p.z])
axis_y = np.array(math_utils.get_basis_vector_y(xform_attr.r))
axis_z = np.array(math_utils.get_basis_vector_z(xform_attr.r))
self.robot.end_effector.go_local(
orig=orig,
axis_x=[],
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def get_target_orientation(self):
"""
Gets target gripper orientation given target angle and a plannar grasp.
"""
angle = self.target_angle * np.pi / 180
mat = Gf.Matrix3f(
-np.cos(angle), -np.sin(angle), 0, -np.sin(angle), np.cos(angle), 0, 0, 0, -1
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
return q
def get_target_to_point(self, offset_position=[]):
"""
Get target Panda arm pose from target position and angle.
"""
offset = _dynamic_control.Transform()
if offset_position:
offset.p.x = offset_position[0]
offset.p.y = offset_position[1]
offset.p.z = offset_position[2]
target_pose = _dynamic_control.Transform()
target_pose.p = self.target_point
target_pose.r = self.get_target_orientation()
target_pose = math_utils.mul(target_pose, offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
return target_pose
def set_target_to_point(self, offset_position=[], n_waypoints=1, clear_waypoints=True):
"""
Clears waypoints list, and sets a new waypoint list towards the a given point in space.
"""
target_position = self.get_target_to_point(offset_position=offset_position)
# linear interpolate to target pose
if clear_waypoints:
self.waypoints.clear()
self.lerp_to_pose(target_position, n_waypoints=n_waypoints)
# Get first waypoint target
self.target_position = self.waypoints.popleft()
def step(self, timestamp, start=False, reset=False):
"""
Steps the State machine, handling which event to call.
"""
if self.current_state != self.previous_state:
self.previous_state = self.current_state
if not self.start:
self.start = start
if self.current_state in [SM_states.GRASPING, SM_states.LIFTING]:
# object grasped
if not self.robot.end_effector.gripper.is_closed(1e-1) and not self.robot.end_effector.gripper.is_moving(1e-2):
self._attached = True
# self.is_closed = False
# object not grasped
elif self.robot.end_effector.gripper.is_closed(1e-1):
self._detached = True
self.is_closed = True
# Process events
if reset:
# reset to default pose, clear waypoints, and re-initialize event handlers
self.current_state = SM_states.STANDBY
self.previous_state = -1
self.robot.end_effector.gripper.open()
self.evaluation = None
self.start = False
self._time = 0
self.start_time = self._time
self.pick_count = 0
self.waypoints.clear()
self._detached = False
self._attached = False
self.target_position = self.default_position
self.move_to_target()
elif self._detached:
self._detached = False
self.sm[self.current_state][SM_events.DETACHED]()
elif self.goalReached():
if len(self.waypoints) == 0:
self.sm[self.current_state][SM_events.GOAL_REACHED]()
else:
self.target_position = self.waypoints.popleft()
self.move_to_target()
# self.start_time = self._time
elif self.current_state == SM_states.STANDBY and self.start:
self.sm[self.current_state][SM_events.START]()
elif self._attached:
self._attached = False
self.sm[self.current_state][SM_events.ATTACHED]()
elif self._time - self.start_time > self.default_timeout:
self.sm[self.current_state][SM_events.TIMEOUT]()
else:
self.sm[self.current_state][SM_events.NONE]()
self._time += 1.0 / 60.0
# Event handling functions. Each state has its own event handler function depending on which event happened
def _standby_start(self, *args):
"""
Handles the start event when in standby mode.
Proceeds to move towards target grasp pose.
"""
# Tell motion planner controller to ignore current object as an obstacle
self.pick_count = 0
self.evaluation = None
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 60)
self.robot.end_effector.gripper.open()
# set target above the current bin with offset of 10 cm
self.set_target_to_point(offset_position=[0.0, 0.0, -10.0], n_waypoints=90, clear_waypoints=False)
# pause before lowering to target object
self.lerp_to_pose(self.waypoints[-1], 180)
self.set_target_to_point(n_waypoints=90, clear_waypoints=False)
# start arm movement
self.move_to_target()
# Move to next state
self.change_state(SM_states.PICKING)
# NOTE: As is, this method is never executed
def _standby_goal_reached(self, *args):
"""
Reset grasp execution.
"""
self.move_to_zero()
self.start = True
def _picking_goal_reached(self, *args):
"""
Grap pose reached, close gripper.
"""
self.robot.end_effector.gripper.close()
self.is_closed = True
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.GRASPING)
def _grasping_attached(self, *args):
"""
Object grasped, lift arm.
"""
self.waypoints.clear()
offset = _dynamic_control.Transform()
offset.p.z = -10
target_pose = math_utils.mul(self.get_current_state_tr(), offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
self.lerp_to_pose(target_pose, n_waypoints=60)
self.lerp_to_pose(target_pose, n_waypoints=120)
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.LIFTING)
def _lifting_goal_reached(self, *args):
"""
Finished executing grasp successfully, resets for next grasp execution.
"""
self.is_closed = False
self.robot.end_effector.gripper.open()
self._all_detached()
self.pick_count += 1
self.evaluation = GRASP_eval.SUCCESS
carb.log_warn(str(GRASP_eval.SUCCESS))
def _all_timeout(self, *args):
"""
Timeout reached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.robot.end_effector.gripper.open()
self.start = False
self.waypoints.clear()
self.target_position = self.default_position
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
def _all_detached(self, *args):
"""
Object detached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.start = False
self.waypoints.clear()
self.lerp_to_pose(self.target_position, 60)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
class GraspObject(Scenario):
""" Defines an obstacle avoidance scenario
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, kit, dc, mp):
"""
Initialize scenario.
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
super().__init__(kit.editor, dc, mp)
self._kit = kit
self._paused = True
self._start = False
self._reset = False
self._time = 0
self._start_time = 0
self.current_state = SM_states.STANDBY
self.timeout_max = 8.0
self.pick_and_place = None
self._pending_stop = False
self._gripper_open = False
self.current_obj = 0
self.max_objs = 100
self.num_objs = 3
self.add_objects_timeout = -1
self.franka_solid = None
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
else:
self.nucleus_server = nucleus_server
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
if self.franka_solid:
self.franka_solid.end_effector.gripper = None
super().__del__()
def add_object_path(self, object_path, from_server=False):
"""
Add object usd path.
"""
if from_server and hasattr(self, 'nucleus_server'):
object_path = os.path.join(self.nucleus_server, object_path)
if not from_server and os.path.isdir(object_path): objects_usd = glob.glob(os.path.join(object_path, '**/*.usd'), recursive=True)
else: object_usd = [object_path]
if hasattr(self, 'objects_usd'):
self.objects_usd.extend(object_usd)
else:
self.objects_usd = object_usd
def create_franka(self, *args):
"""
Create franka USD objects and bin USD objects.
"""
super().create_franka()
if self.asset_path is None:
return
# Load robot environment and set its transform
self.env_path = "/scene"
robot_usd = self.asset_path + "/Robots/Franka/franka.usd"
robot_path = "/scene/robot"
create_prim_from_usd(self._stage, robot_path, robot_usd, Gf.Vec3d(0, 0, 0))
bin_usd = self.asset_path + "/Props/KLT_Bin/large_KLT.usd"
bin_path = "/scene/bin"
create_prim_from_usd(self._stage, bin_path, bin_usd, Gf.Vec3d(40, 0, 4))
# Set robot end effector Target
target_path = "/scene/target"
if self._stage.GetPrimAtPath(target_path):
return
GoalPrim = self._stage.DefinePrim(target_path, "Xform")
self.default_position = _dynamic_control.Transform()
self.default_position.p = [0.4, 0.0, 0.3]
self.default_position.r = [0.0, 1.0, 0.0, 0.0] #TODO: Check values for stability
p = self.default_position.p
r = self.default_position.r
set_translate(GoalPrim, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(GoalPrim, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
# Setup physics simulation
add_ground_plane(self._stage, "/groundPlane", "Z", 1000.0, Gf.Vec3f(0.0), Gf.Vec3f(1.0))
setup_physics(self._stage)
def rand_position(self, bound, margin=0, z_range=None):
"""
Obtain random position contained within a specified bound.
"""
x_range = (bound[0][0] * (1 - margin), bound[1][0] * (1 - margin))
y_range = (bound[0][1] * (1 - margin), bound[1][1] * (1 - margin))
if z_range is None:
z_range = (bound[0][2] * (1 - margin), bound[1][2] * (1 - margin))
x = np.random.uniform(*x_range)
y = np.random.uniform(*y_range)
z = np.random.uniform(*z_range)
return Gf.Vec3d(x, y, z)
# combine add_object and add_and_register_object
def add_object(self, *args, register=True, position=None):
"""
Add object to scene.
"""
prim = self.create_new_objects(position=position)
if not register:
return prim
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def create_new_objects(self, *args, position=None):
"""
Randomly select and create prim of object in scene.
"""
if not hasattr(self, 'objects_usd'):
return
prim_usd_path = self.objects_usd[random.randint(0, len(self.objects_usd) - 1)]
prim_env_path = "/scene/objects/object_{}".format(self.current_obj)
if position is None:
position = self.rand_position(self.bin_solid.get_bound(), margin=0.2, z_range=(10, 10))
prim = create_prim_from_usd(self._stage, prim_env_path, prim_usd_path, position)
if hasattr(self, 'current_obj'): self.current_obj += 1
else: self.current_obj = 0
return prim
def register_objects(self, *args):
"""
Register all objects.
"""
self.objects = []
objects_path = '/scene/objects'
objects_prim = self._stage.GetPrimAtPath(objects_path)
if objects_prim.IsValid():
for object_prim in objects_prim.GetChildren():
self.objects.append(RigidBody(object_prim, self._dc))
# TODO: Delete method
def add_and_register_object(self, *args):
prim = self.create_new_objects()
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def register_scene(self, *args):
"""
Register world, panda arm, bin, and objects.
"""
self.world = World(self._dc, self._mp)
self.register_assets(args)
self.register_objects(args)
def register_assets(self, *args):
"""
Connect franka controller to usd assets.
"""
# register robot with RMP
robot_path = "/scene/robot"
self.franka_solid = Franka(
self._stage, self._stage.GetPrimAtPath(robot_path), self._dc, self._mp, self.world, default_config
)
# register bin
bin_path = "/scene/bin"
bin_prim = self._stage.GetPrimAtPath(bin_path)
self.bin_solid = RigidBody(bin_prim, self._dc)
# register stage machine
self.pick_and_place = PickAndPlaceStateMachine(
self._stage,
self.franka_solid,
self._stage.GetPrimAtPath("/scene/robot/panda_hand"),
self.default_position,
)
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._start = True
self._paused = False
return False
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
if self._editor.is_playing():
if self._pending_stop:
self.stop_tasks()
return
# Updates current references and locations for the robot.
self.world.update()
self.franka_solid.update()
target = self._stage.GetPrimAtPath("/scene/target")
xform_attr = target.GetAttribute("xformOp:transform")
if self._reset:
self._paused = False
if not self._paused:
self._time += 1.0 / 60.0
self.pick_and_place.step(self._time, self._start, self._reset)
if self._reset:
self._paused = True
self._time = 0
self._start_time = 0
p = self.default_position.p
r = self.default_position.r
set_translate(target, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
else:
state = self.franka_solid.end_effector.status.current_target
state_1 = self.pick_and_place.target_position
tr = state["orig"] * 100.0
set_translate(target, Gf.Vec3d(tr[0], tr[1], tr[2]))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(state_1.r.w, state_1.r.x, state_1.r.y, state_1.r.z)))
self._start = False
self._reset = False
if self.add_objects_timeout > 0:
self.add_objects_timeout -= 1
if self.add_objects_timeout == 0:
self.create_new_objects()
else:
translate_attr = xform_attr.Get().GetRow3(3)
rotate_x = xform_attr.Get().GetRow3(0)
rotate_y = xform_attr.Get().GetRow3(1)
rotate_z = xform_attr.Get().GetRow3(2)
orig = np.array(translate_attr) / 100.0
axis_x = np.array(rotate_x)
axis_y = np.array(rotate_y)
axis_z = np.array(rotate_z)
self.franka_solid.end_effector.go_local(
orig=orig,
axis_x=axis_x, # TODO: consider setting this to [] for stability reasons
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
if self.pick_and_place is not None:
if self._editor.is_playing():
self._reset = True
self._pending_stop = False
else:
self._pending_stop = True
def pause_tasks(self, *args):
"""
Pause tasks in the scenario.
"""
self._paused = not self._paused
return self._paused
# TODO: use gripper.width == 0 as a proxy for _gripper_open == False
def actuate_gripper(self):
"""
Actuate Panda gripper.
"""
if self._gripper_open:
self.franka_solid.end_effector.gripper.close()
self._gripper_open = False
else:
self.franka_solid.end_effector.gripper.open()
self._gripper_open = True
def set_target_angle(self, angle):
"""
Set grasp angle in degrees.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_angle = angle
def set_target_position(self, position):
"""
Set grasp position.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_point = position
|