\n \n \n\n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n"},"size":{"kind":"number","value":14745,"string":"14,745"},"lang":{"kind":"string","value":"XML"},"avg_line_length":{"kind":"number","value":59.68312732640688,"string":"59.683127"},"max_line_length":{"kind":"number","value":122,"string":"122"},"alphanum_fraction":{"kind":"number","value":0.5770769752067089,"string":"0.577077"}}},{"rowIdx":9413,"cells":{"file_path":{"kind":"string","value":"boredengineering/Robots_for_Omniverse/URDF_descriptions/MIT_mini-cheetah/README.md"},"content":{"kind":"string","value":"# MIT Mini Cheetah\nAn urdf description file of a quadruped robot modeled on mini cheetah.\n\n>Source: \n>- YOBOTICS, INC. \n>- [MIT Mini Cheetah - Original](https://github.com/HitSZwang/mini-cheetah-gazebo-urdf) \n>- [MIT Mini Cheetah - Modified for CHAMP](https://github.com/chvmp/mini-cheetah-gazebo-urdf) \n"},"size":{"kind":"number","value":321,"string":"321"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":39.24999509375062,"string":"39.249995"},"max_line_length":{"kind":"number","value":98,"string":"98"},"alphanum_fraction":{"kind":"number","value":0.7258566955580789,"string":"0.725857"}}},{"rowIdx":9414,"cells":{"file_path":{"kind":"string","value":"boredengineering/Robots_for_Omniverse/URDF_descriptions/BostonDynamics/README.md"},"content":{"kind":"string","value":"# Boston Dynamics Robots\nhttps://www.bostondynamics.com/\n\n## Little Dog\n\n> Source: \n> - [Boston Dynamic's Little Dog - by RobotLocomotion](https://github.com/RobotLocomotion/LittleDog)\n> - [Boston Dynamic's Little Dog - Modified for CHAMP](https://github.com/chvmp/littledog_description)\n\n## Spot\n\n> Source: \n> - [Boston Dynamic's Spot - by heuristicus](https://github.com/heuristicus/spot_ros)\n> - [Boston Dynamic's Spot - Modified for CHAMP](https://github.com/chvmp/spot_ros)"},"size":{"kind":"number","value":486,"string":"486"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":33.785711872449156,"string":"33.785712"},"max_line_length":{"kind":"number","value":102,"string":"102"},"alphanum_fraction":{"kind":"number","value":0.7263374470651492,"string":"0.726337"}}},{"rowIdx":9415,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/README.md"},"content":{"kind":"string","value":"# IES Viewer Omniverse Extension\n\n\n\nThis extension displays IES profile web for selected light objects. It is particularly useful for visualizing architectural lighting designs. Orientation of measured light distribution profiles could be quickly tested with visual feedback. IES files are resampled to be light weight and consistant to render. [A video demo](https://drive.google.com/file/d/1DxvjVGT6ZlfukfuTvyBu3iXaHz8qvY5Q/view?usp=sharing)\n\nThis extension is developed based on the [omni.example.ui_scene.object_info](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene/tree/main/exts/omni.example.ui_scene.object_info)\n\nSupported light type: sphere light, rectangular light, disk light and cylinder light.\n\nOnly Type C IES file is supported currently, which is also the most commonly used type for architectural light.\n\n## Adding This Extension\n\nTo add a this extension to your Omniverse app:\n1. Go to Extension Manager and turn on Viewport Utility extension\n2. Add `git://github.com/XiaomingY/omni-ies-viewer.git?branch=main&dir=exts` to extension search path\n3. Turn on IES Viewer Extension\n"},"size":{"kind":"number","value":1150,"string":"1,150"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":59.57894423268714,"string":"59.578944"},"max_line_length":{"kind":"number","value":407,"string":"407"},"alphanum_fraction":{"kind":"number","value":0.8060869558207939,"string":"0.806087"}}},{"rowIdx":9416,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/tools/scripts/link_app.py"},"content":{"kind":"string","value":"import os\nimport argparse\nimport sys\nimport json\nimport packmanapi\nimport urllib3\n\n\ndef find_omniverse_apps():\n http = urllib3.PoolManager()\n try:\n r = http.request(\"GET\", \"http://127.0.0.1:33480/components\")\n except Exception as e:\n print(f\"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\\nError: {e}\")\n sys.exit(1)\n\n apps = {}\n for x in json.loads(r.data.decode(\"utf-8\")):\n latest = x.get(\"installedVersions\", {}).get(\"latest\", \"\")\n if latest:\n for s in x.get(\"settings\", []):\n if s.get(\"version\", \"\") == latest:\n root = s.get(\"launch\", {}).get(\"root\", \"\")\n apps[x[\"slug\"]] = (x[\"name\"], root)\n break\n return apps\n\n\ndef create_link(src, dst):\n print(f\"Creating a link '{src}' -> '{dst}'\")\n packmanapi.link(src, dst)\n\n\nAPP_PRIORITIES = [\"code\", \"create\", \"view\"]\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Create folder link to Kit App installed from Omniverse Launcher\")\n parser.add_argument(\n \"--path\",\n help=\"Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'\",\n required=False,\n )\n parser.add_argument(\n \"--app\", help=\"Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'\", required=False\n )\n args = parser.parse_args()\n\n path = args.path\n if not path:\n print(\"Path is not specified, looking for Omniverse Apps...\")\n apps = find_omniverse_apps()\n if len(apps) == 0:\n print(\n \"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers.\"\n )\n sys.exit(0)\n\n print(\"\\nFound following Omniverse Apps:\")\n for i, slug in enumerate(apps):\n name, root = apps[slug]\n print(f\"{i}: {name} ({slug}) at: '{root}'\")\n\n if args.app:\n selected_app = args.app.lower()\n if selected_app not in apps:\n choices = \", \".join(apps.keys())\n print(f\"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}\")\n sys.exit(0)\n else:\n selected_app = next((x for x in APP_PRIORITIES if x in apps), None)\n if not selected_app:\n selected_app = next(iter(apps))\n\n print(f\"\\nSelected app: {selected_app}\")\n _, path = apps[selected_app]\n\n if not os.path.exists(path):\n print(f\"Provided path doesn't exist: {path}\")\n else:\n SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))\n create_link(f\"{SCRIPT_ROOT}/../../app\", path)\n print(\"Success!\")\n"},"size":{"kind":"number","value":2813,"string":"2,813"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":32.49999961309524,"string":"32.5"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.5623889084385394,"string":"0.562389"}}},{"rowIdx":9417,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/tools/packman/config.packman.xml"},"content":{"kind":"string","value":"\n \n \n \n\n"},"size":{"kind":"number","value":211,"string":"211"},"lang":{"kind":"string","value":"XML"},"avg_line_length":{"kind":"number","value":34.33332761111206,"string":"34.333328"},"max_line_length":{"kind":"number","value":123,"string":"123"},"alphanum_fraction":{"kind":"number","value":0.691943124682734,"string":"0.691943"}}},{"rowIdx":9418,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/tools/packman/bootstrap/install_package.py"},"content":{"kind":"string","value":"# Copyright 2019 NVIDIA CORPORATION\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport zipfile\nimport tempfile\nimport sys\nimport shutil\n\n__author__ = \"hfannar\"\nlogging.basicConfig(level=logging.WARNING, format=\"%(message)s\")\nlogger = logging.getLogger(\"install_package\")\n\n\nclass TemporaryDirectory:\n def __init__(self):\n self.path = None\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, type, value, traceback):\n # Remove temporary data created\n shutil.rmtree(self.path)\n\n\ndef install_package(package_src_path, package_dst_path):\n with zipfile.ZipFile(\n package_src_path, allowZip64=True\n ) as zip_file, TemporaryDirectory() as temp_dir:\n zip_file.extractall(temp_dir)\n # Recursively copy (temp_dir will be automatically cleaned up on exit)\n try:\n # Recursive copy is needed because both package name and version folder could be missing in\n # target directory:\n shutil.copytree(temp_dir, package_dst_path)\n except OSError as exc:\n logger.warning(\n \"Directory %s already present, packaged installation aborted\" % package_dst_path\n )\n else:\n logger.info(\"Package successfully installed to %s\" % package_dst_path)\n\n\ninstall_package(sys.argv[1], sys.argv[2])\n"},"size":{"kind":"number","value":1888,"string":"1,888"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":31.56896497294888,"string":"31.568965"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.6869703386191894,"string":"0.68697"}}},{"rowIdx":9419,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/extension.py"},"content":{"kind":"string","value":"import omni.ext\nimport omni.ui as ui\nfrom omni.kit.viewport.utility import get_active_viewport_window\nfrom .viewport_scene import ViewportSceneInfo\n\n\n# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be\n# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled\n# on_shutdown() is called.\nclass AimingToolExtension(omni.ext.IExt):\n # ext_id is current extension id. It can be used with extension manager to query additional information, like where\n # this extension is located on filesystem.\n def __init__(self) -> None:\n super().__init__()\n self.viewport_scene = None\n\n def on_startup(self, ext_id):\n viewport_window = get_active_viewport_window()\n\n self.viewport_scene = ViewportSceneInfo(viewport_window, ext_id)\n\n def on_shutdown(self):\n if self.viewport_scene:\n self.viewport_scene.destroy()\n self.viewport_scene = None\n"},"size":{"kind":"number","value":1023,"string":"1,023"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":38.38461390828408,"string":"38.384614"},"max_line_length":{"kind":"number","value":119,"string":"119"},"alphanum_fraction":{"kind":"number","value":0.7086999015555231,"string":"0.7087"}}},{"rowIdx":9420,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/viewport_scene.py"},"content":{"kind":"string","value":"from omni.ui import scene as sc\nimport omni.ui as ui\n\nfrom .object_info_manipulator import ObjInfoManipulator\nfrom .object_info_model import ObjInfoModel\n\nclass ViewportSceneInfo():\n \"\"\"The Object Info Manipulator, placed into a Viewport\"\"\"\n def __init__(self, viewport_window, ext_id) -> None:\n self.scene_view = None\n self.viewport_window = viewport_window\n\n # NEW: Create a unique frame for our SceneView\n with self.viewport_window.get_frame(ext_id):\n # Create a default SceneView (it has a default camera-model)\n self.scene_view = sc.SceneView()\n # Add the manipulator into the SceneView's scene\n with self.scene_view.scene:\n ObjInfoManipulator(model=ObjInfoModel())\n # Register the SceneView with the Viewport to get projection and view updates\n self.viewport_window.viewport_api.add_scene_view(self.scene_view)\n\n def __del__(self):\n self.destroy()\n\n def destroy(self):\n if self.scene_view:\n # Empty the SceneView of any elements it may have\n self.scene_view.scene.clear()\n # un-register the SceneView from Viewport updates\n if self.viewport_window:\n self.viewport_window.viewport_api.remove_scene_view(self.scene_view)\n # Remove our references to these objects\n self.viewport_window = None\n self.scene_view = None"},"size":{"kind":"number","value":1422,"string":"1,422"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":39.65714172408167,"string":"39.657142"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.6561181429985103,"string":"0.656118"}}},{"rowIdx":9421,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/__init__.py"},"content":{"kind":"string","value":"from .extension import *\n"},"size":{"kind":"number","value":25,"string":"25"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":11.999994000003,"string":"11.999994"},"max_line_length":{"kind":"number","value":24,"string":"24"},"alphanum_fraction":{"kind":"number","value":0.7599999696000012,"string":"0.76"}}},{"rowIdx":9422,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/object_info_model.py"},"content":{"kind":"string","value":"from pxr import Tf\nfrom pxr import Gf\nfrom pxr import Usd\nfrom pxr import UsdGeom\nfrom pxr import UsdShade\nfrom pxr import UsdLux\nfrom .IESReader import IESLight\n\nimport os.path\nimport numpy as np\n\nfrom omni.ui import scene as sc\nimport omni.usd\n\n\ndef _flatten_matrix(matrix: Gf.Matrix4d):\n m0, m1, m2, m3 = matrix[0], matrix[1], matrix[2], matrix[3]\n return [\n m0[0],\n m0[1],\n m0[2],\n m0[3],\n m1[0],\n m1[1],\n m1[2],\n m1[3],\n m2[0],\n m2[1],\n m2[2],\n m2[3],\n m3[0],\n m3[1],\n m3[2],\n m3[3],\n ]\n\nclass ObjInfoModel(sc.AbstractManipulatorModel):\n \"\"\"\n The model tracks the position and info of the selected object.\n \"\"\"\n class MatrixItem(sc.AbstractManipulatorItem):\n \"\"\"\n The Model Item represents the tranformation. It doesn't contain anything\n because we take the tranformation directly from USD when requesting.\n \"\"\"\n\n identity = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n\n def __init__(self):\n super().__init__()\n self.value = self.identity.copy()\n\n class PositionItem(sc.AbstractManipulatorItem):\n \"\"\"\n The Model Item represents the position. It doesn't contain anything\n because we take the position directly from USD when requesting.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.value = [0, 0, 0]\n\n class PositionList(sc.AbstractManipulatorItem):\n \"\"\"\n The Model Item represents the position. It doesn't contain anything\n because we take the position directly from USD when requesting.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.value = [[0,0,0]]\n\n def __init__(self) -> None:\n super().__init__()\n\n # Current selected prim list\n self.prim = []\n self.current_path = []\n self.material_name = []\n\n self.stage_listener = None\n self.horizontal_step = 15\n self.vertical_step = 15\n\n self.IESPoints = [ObjInfoModel.PositionList()]\n self.transformation = [ObjInfoModel.MatrixItem()]\n\n # Save the UsdContext name (we currently only work with a single Context)\n self.usd_context = self._get_context()\n\n # Track selection changes\n self.events = self.usd_context.get_stage_event_stream()\n self.stage_event_delegate = self.events.create_subscription_to_pop(\n self.on_stage_event, name=\"Object Info Selection Update\"\n )\n\n @property\n def _time(self):\n return Usd.TimeCode.Default()\n\n def _get_context(self) -> Usd.Stage:\n # Get the UsdContext we are attached to\n return omni.usd.get_context()\n \n #Update when light are transformed or modified\n def notice_changed(self, notice: Usd.Notice, stage: Usd.Stage) -> None:\n \"\"\"Called by Tf.Notice. Used when the current selected object changes in some way.\"\"\"\n\n light_path = self.current_path\n if not light_path:\n return\n\n for p in notice.GetChangedInfoOnlyPaths():\n \n prim_path = p.GetPrimPath().pathString\n\n #check if prim_path not in selected list but parent of prim_path is in selected list\n if prim_path not in light_path:\n if (True in (light_path_item.startswith(prim_path) for light_path_item in light_path)):\n if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):\n self._item_changed(self.transformation[0])\n continue\n if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):\n self._item_changed(self.transformation[0])\n #if light property changed such as ies file changed, update profile\n self._item_changed(self.transformation[0])\n \n def _get_transform(self, time: Usd.TimeCode):\n \"\"\"Returns world transform of currently selected object\"\"\"\n if not self.prim:\n return [ObjInfoModel.MatrixItem.identity.copy()]\n\n # Compute matrix from world-transform in USD\n #get transform matrix for each selected light\n world_xform_list = [UsdGeom.BasisCurves(prim).ComputeLocalToWorldTransform(time) for prim in self.prim]\n\n # Flatten Gf.Matrix4d to list\n return [_flatten_matrix(world_xform) for world_xform in world_xform_list]\n\n def get_item(self, identifier):\n if identifier == \"IESPoints\":\n return self.IESPoints\n if identifier == \"transformation\":\n return self.transformation\n\n def get_as_floats(self, item):\n if item == self.transformation:\n return self._get_transform(self._time)\n if item == self.IESPoints:\n return self.get_points(self._time)\n return []\n \n #get ies points for each selected light\n def get_points(self, time: Usd.TimeCode):\n if not self.prim:\n return [[0,0,0]]\n allIESPoint = []\n for prim in self.prim:\n iesFile = prim.GetAttribute('shaping:ies:file').Get()\n allIESPoint.append(IESLight(str(iesFile).replace('@', '')).points)\n return allIESPoint\n\n def on_stage_event(self, event):\n \"\"\"Called by stage_event_stream. We only care about selection changes.\"\"\"\n if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):\n self.current_path = []\n self.prim = []\n primList = []\n primPathList = []\n usd_context = self._get_context()\n stage = usd_context.get_stage()\n if not stage:\n return\n\n prim_paths = usd_context.get_selection().get_selected_prim_paths()\n\n if not prim_paths:\n # This turns off the manipulator when everything is deselected\n self._item_changed(self.transformation[0])\n return\n #select light with ies file applied.\n lightCount = 0\n for i in prim_paths:\n prim = stage.GetPrimAtPath(i)\n if(UsdLux.Light(prim) and prim.GetAttribute('shaping:ies:file').Get() and not (prim.IsA(UsdLux.DistantLight))):\n primList.append(prim)\n primPathList.append(i)\n lightCount = lightCount +1\n if(lightCount==0):\n if self.stage_listener:\n self.stage_listener.Revoke()\n self.stage_listener = None\n self._item_changed(self.transformation[0])\n return\n\n if not self.stage_listener:\n # This handles camera movement\n self.stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self.notice_changed, stage)\n\n self.prim = primList\n self.current_path = primPathList\n # Position is changed because new selected object has a different position\n self._item_changed(self.transformation[0])\n\n def destroy(self):\n self.events = None\n self.stage_event_delegate.unsubscribe()"},"size":{"kind":"number","value":7171,"string":"7,171"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":33.98536568787627,"string":"33.985366"},"max_line_length":{"kind":"number","value":127,"string":"127"},"alphanum_fraction":{"kind":"number","value":0.5855529214076763,"string":"0.585553"}}},{"rowIdx":9423,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/object_info_manipulator.py"},"content":{"kind":"string","value":"from __future__ import division\nfrom omni.ui import scene as sc\nfrom omni.ui import color as cl\nimport omni.ui as ui\nimport numpy as np\n\nclass ObjInfoManipulator(sc.Manipulator):\n \"\"\"Manipulator that displays the object path and material assignment\n with a leader line to the top of the object's bounding box.\n \"\"\"\n def on_build(self):\n \"\"\"Called when the model is changed and rebuilds the whole manipulator\"\"\"\n \n if not self.model:\n return\n\n IESPoints = self.model.get_as_floats(self.model.IESPoints)\n\n numHorizontal = int((360/self.model.horizontal_step)+1)\n primCount = 0\n\n for transformation in self.model.get_as_floats(self.model.transformation):\n self.__root_xf = sc.Transform(transformation)\n\n with self.__root_xf:\n self._x_xform = sc.Transform()\n with self._x_xform:\n self._shape_xform = sc.Transform()\n \n IESPoint = IESPoints[primCount]\n numVertical = int(len(IESPoint)/numHorizontal)\n for index in range(0,numHorizontal):\n points = IESPoint[index*numVertical:(index+1)*numVertical]\n if(len(points)>0):\n sc.Curve(points.tolist(), thicknesses=[1.0], colors=[cl.yellow],tessellation=9)\n primCount = primCount+1\n \n def on_model_updated(self, item):\n # Regenerate the manipulator\n self.invalidate()"},"size":{"kind":"number","value":1526,"string":"1,526"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":37.17499907062503,"string":"37.174999"},"max_line_length":{"kind":"number","value":107,"string":"107"},"alphanum_fraction":{"kind":"number","value":0.5891218869009686,"string":"0.589122"}}},{"rowIdx":9424,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/IESReader.py"},"content":{"kind":"string","value":"import numpy as np\nimport re\nimport math\n#import matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport os.path\n#from mpl_toolkits.mplot3d.axes3d import Axes3D\nimport omni.ext\nimport omni.ui as ui\nomni.kit.pipapi.install(\"astropy\")\nfrom astropy.coordinates import spherical_to_cartesian\n\nDEFAULT_HORIZONTAL_STEP = 15\nDEFAULT_VERTICAL_STEP = 15\nIES_MaxLength = 80\n\nclass IESLight():\n def __init__(self,iesFile):\n\n # Current selected prim\n if iesFile and os.path.exists(iesFile):\n self.file = iesFile\n else:\n return\n self.width = 0\n self.length = 0\n self.radius = 0\n all_values = self.readIESfile(self.file)\n verticalAngles,horizontalAngles,intensities,self.width,self.length,self.radius = self.getIESproperties(all_values)\n horizontalAnglesMirrored, intensityMirrored = self.mirrorAngles(horizontalAngles,intensities)\n horizontalResampled = np.arange(0, 361, DEFAULT_HORIZONTAL_STEP)\n verticalResampled = np.arange(0, verticalAngles[-1]+1, DEFAULT_VERTICAL_STEP)\n resampledIntensity = self.interpolateIESValues(np.array(horizontalAnglesMirrored),np.array(verticalAngles),horizontalResampled,verticalResampled,intensityMirrored)\n self.points = self.IESCoord2XYZ(horizontalResampled,verticalResampled,resampledIntensity,IES_MaxLength)\n #read ies files and return vertical angles, horizontal angles, intensities, width, length, radius.\n #based on the symmetry, horizontal angles and resampled\n def readIESfile(self, fileName):\n f=open(fileName, encoding = \"ISO-8859-1\")#need rb to read \\r\\n correctly. Otherwise universial newline function ignores carriage return. \n startReading = 0\n line = f.readline()\n allValues = \"\"\n while line: \n if( not(line.strip())):\n break\n else:\n #after this line, there are actual useful values\n if(\"TILT=NONE\" in line.strip()):\n line = f.readline()\n startReading = 1\n #read all number to one string\n if(startReading):\n allValues = allValues+line\n \n line = f.readline()\n\n f.close()\n #one array with all values\n dimentions = re.split('\\s+',allValues.strip())\n return dimentions\n \n def getIESproperties(self, allValues):\n #return \n FEET2METER = 0.3048\n verticalAngles = []\n horizontalAngles = []\n width = 0\n length = 0\n radius = 0\n intensityMultiplier = 1\n numberVerticalAngle = 0\n numberHorizontalAngle = 0\n unit = 1 #1 for feet, 2 for meter\n \n #number of vertical angles and horizontal angles measured\n numberVerticalAngle = int(allValues[3])\n numberHorizontalAngle = int(allValues[4])\n \n #check if shape is rectangle or disk\n if(float(allValues[7])<0):\n radius = allValues[7]*-1\n else:\n width = allValues[7]\n length = allValues[8]\n #convert dimentions to meter if measured in feet\n if(float(allValues[6])==1):\n radius = radius*FEET2METER\n width = width *FEET2METER\n length = length * FEET2METER\n \n #the actual vertical angles and horizontal angles in list\n verticalAngles = list(map(float, allValues[13:13+numberVerticalAngle]))\n horizontalAngles = list(map(float,allValues[13+numberVerticalAngle:13+numberVerticalAngle+numberHorizontalAngle]))\n \n #read intensities and convert it to 2d array\n intensities = np.array(allValues[13+numberVerticalAngle+numberHorizontalAngle:len(allValues)])\n intensities = intensities.reshape(numberHorizontalAngle,numberVerticalAngle).astype(np.float16)\n \n return verticalAngles,horizontalAngles,intensities,width,length,radius\n\n #ies could have several symmetry:\n #(1)only measured in one horizontal angle (0) which need to be repeated to all horizontal angle from 0 to 360\n #(2)only measured in horizontal angles (0~90) which need to be mirrored twice to horizontal angle from 0 to 360\n #(3)only measured in horizontal angles (0~180) which need to be mirrored to horizontal angle from 0 to 360\n #(4)only measured in horizontal angles (0~360) which could be used directly\n def mirrorAngles(self, horizontalAngles,intensities):\n #make use of symmetry in the file and produce horizontal angles from 0~360\n if(horizontalAngles[-1]==0):\n horizontalAnglesMirrored = list(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))\n else:\n horizontalAnglesMirrored = list(np.arange(0,361,horizontalAngles[-1]/(len(horizontalAngles)-1)))\n \n #make use of symmetry in the file and copy intensitys for horizontal angles from 0~360\n if(horizontalAngles[-1]==90):\n #mirror results [90:180]\n a = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)\n intensityMirrored = np.concatenate((a, np.flip(a, 0)[1:]), axis=0)\n elif(horizontalAngles[-1]==180):\n intensityMirrored = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)\n elif(horizontalAngles[-1]==0):\n intensityMirrored = np.array(([intensities[0],]*len(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))))\n else:\n #print(\"Symmetry 360\")\n intensityMirrored = intensities\n\n return horizontalAnglesMirrored, intensityMirrored\n \n def IESCoord2XYZ(self, horizontalAngles,verticalAngles,intensity,maxLength):\n maxValue = np.amax(intensity)\n if(maxValue>maxLength):\n intensity = intensity*(maxLength/maxValue)\n for index, horizontalAngle in enumerate(horizontalAngles):\n if(index ==0):\n #Omniverse and 3ds Max makes the light upside down, horizontal angle rotation direction need to be flipped.\n points = np.array(spherical_to_cartesian(intensity[index].tolist(), [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()\n else:\n newPoints = np.array(spherical_to_cartesian(intensity[index], [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()\n points = np.concatenate((points, newPoints), axis=0)\n #Omniverse and 3ds Max makes the light upside down, so flip z.\n points[:,2] *= -1\n return points\n \n def interpolateIESValues(self, originalHorizontalAngles, originalVerticalAngles, newHorizontalAngles,newVerticalAngles, intensity):\n fun = interpolate.interp2d(originalVerticalAngles, originalHorizontalAngles, intensity, kind='linear') # kind could be {'linear', 'cubic', 'quintic'}\n interpolatedIntensity = fun(newVerticalAngles,newHorizontalAngles)\n return interpolatedIntensity"},"size":{"kind":"number","value":7030,"string":"7,030"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":47.489654844898936,"string":"47.489655"},"max_line_length":{"kind":"number","value":198,"string":"198"},"alphanum_fraction":{"kind":"number","value":0.6610241819827846,"string":"0.661024"}}},{"rowIdx":9425,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/config/extension.toml"},"content":{"kind":"string","value":"[package]\n# Semantic Versionning is used: https://semver.org/\nversion = \"1.0.0\"\nauthors = [\"Xiaoming Yang\"]\n# The title and description fields are primarily for displaying extension info in UI\ntitle = \"IES Viewer For Display IES Light Profiles\"\ndescription=\"This extension displays IES profiles for selected light objects.\"\n\n# Path (relative to the root) or content of readme markdown file for UI.\nreadme = \"docs/README.md\"\n\n# URL of the extension source repository.\nrepository = \"https://github.com/XiaomingY/omni-ies-viewer\"\n\n# One of categories for UI.\ncategory = \"Lighting\"\n\n# Keywords for the extension\nkeywords = [\"Lighting\", \"IES\"]\nchangelog = \"docs/CHANGELOG.md\"\npreview_image = \"data/preview.png\"\nicon = \"data/icon.png\"\n\n# Use omni.ui to build simple UI\n[dependencies]\n\"omni.ui.scene\" = { }\n\"omni.usd\" = { }\n\"omni.kit.viewport.utility\" = { }\n\n# Main python module this extension provides, it will be publicly available as \"import AimingTool\".\n[[python.module]]\nname = \"IESViewer\"\n"},"size":{"kind":"number","value":993,"string":"993"},"lang":{"kind":"string","value":"TOML"},"avg_line_length":{"kind":"number","value":29.121211238751176,"string":"29.121211"},"max_line_length":{"kind":"number","value":99,"string":"99"},"alphanum_fraction":{"kind":"number","value":0.732124873381546,"string":"0.732125"}}},{"rowIdx":9426,"cells":{"file_path":{"kind":"string","value":"XiaomingY/omni-ies-viewer/exts/IESViewer/docs/README.md"},"content":{"kind":"string","value":"# IES Viewer Omniverse Extension\n\n\n\nThis extension displays IES profile web for selected light objects. It is particularly useful for visualizing architectural lighting designs. Orientation of measured light distribution profiles could be quickly tested with visual feedback. IES files are resampled to be light weight to render.\nThis entension is developed based on the [omni.example.ui_scene.object_info](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene/tree/main/exts/omni.example.ui_scene.object_info)\n\nSupported light type: sphere light, rectangular light, disk light and cylinder light.\nOnly Type C IES file is supported currently, which is also the most commonly used for architectural light.\n\n## Adding This Extension\n\nTo add a this extension to your Omniverse app:\n1. Go to Extension Manager and turn on Viewport Utility extension\n2. Turn on IESView Extension\n"},"size":{"kind":"number","value":910,"string":"910"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":55.937496503906466,"string":"55.937497"},"max_line_length":{"kind":"number","value":293,"string":"293"},"alphanum_fraction":{"kind":"number","value":0.8076923068047337,"string":"0.807692"}}},{"rowIdx":9427,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/README.md"},"content":{"kind":"string","value":"# RTX Remix Tools [ekozerski.rtxremixtools]\n\nFocusing on improving RTX Remix modding workflows, this extension is designed to speed up iteration when producing assets and mods by providing useful UI operations inside Omniverse apps like USD Composer/Create or Code.\n\nIt provides some options for the \"Right click\" context menu to setup ideal replacement assets, as well as some converting operations to ensure assets will be compatible with the Remix runtime.\n\n\n\nIt is primarily designed to operate on Remix captured scenes, so users can have instant feedbacks on what their mods are gonna look like in the game scenes and iterate faster.\n\n\n## Available Tools\n### Fix Meshes Geometry\n(Operation is performed on every mesh of a USD/USDA source file and can\\'t be undone)\n\nInterpolation Mode\n- RTX Remix runtime only supports meshes with \"vertex\" interpolation mode, in which \"points\" \"normals\" and \"uvs\" arrays \nmust have the same length, but DCC tools usually export the mesh using \"faceVarying\" interpolation mode.\nThis operation reorganizes the geometry to be compatible with the runtime.\n- See: \"Interpolation of Geometric Primitive Variables\" - https://openusd.org/dev/api/class_usd_geom_primvar.html\n- This operation only applies for meshes inside the mods folder, not the captured ones.\n\nUV Maps\n- The runtime supports one single UV map per mesh, which should have one of a few known names, so this script finds many variations, picks one and renames to the standard \"primvars:st\", while also setting the appropriate type as \"TextureCoordinate\" (TexCoord2fArray / TexCoord2f[]). The other UVmaps are discarded.\n\nUnused Primvars\n- displayColor and displayOpacity are now removed from the mesh.\n\n### Setup for Mesh Replacement\nExports the selected mesh in a selected path, already setting up the replacements and references to work in the runtime, so for every change the user only needs to:\n- Open the exported mesh in it's DCC of choice, make the changes and export again (with the right settings, triangulating faces, no materials, etc.)\n- Back in OV, refresh the reference to see the changes in the captured scene.\n- Use the \"Fix Meshes Geometry\" again to make it Remix-compatible.\n- Enjoy.\n\nThe original mesh is kept in case the user only wants to add more models. Make sure to delete it if the intention is to completely replace the original mesh.\n\n### Add Model\nIf the user already has authored USD models, this option allows to select multiple models and add to the mesh_HASH prim.\n\n### Add Material\nThis option allows to select a material .MDL file (AperturePBR_Opacity.mdl or AperturePBR_Translucent.mdl) to add a material prim to the mesh_HASH prim.\n\n### Original Draw Call Preservation\nAllows to set the \"custom int preserveOriginalDrawCall\" attribute to indicate whether the runtime should be forced to render the original mesh or not. Must be set to 1 when placing custom lights or else the original mesh disappears. PS: Remember to set this to 0 if you want to make a mesh replacement and remove the original mesh.\n\n### Select Source Mesh\nQuick way to select the originial source mesh_HASH prim in the scene when you have an instance prim selected.\n\n \n\n## Things to Keep in mind\n- In a capture scene, any changes made to the \"inst_SOMEHASH_x\" prims won't show up in the runtime, so every changes must be done in the \"mesh_SOMEHASH\" they're referencing. Whenever the user clicks a inst_ prim to perform an action like Fixing geometry or Add Model (Ex: Right clicking in the 3D viewport), this tool will try to find the referenced mesh_SOMEHASH and perform the operations in it instead.\n- Having that in mind, always keep an eye in the \"Layers\" tab to check if you have done any changes to the \"instances\" path. Try to delete those changes as much as possible.\n- The only material types that work in the runtime are described in the AperturePBR_Opacity.MDL and AperturePBR_Translucent.MDL, and every mesh must be triangulated. If you want to add a model you got from somewhere else like an asset store, make sure to convert the assets to work in the runtime.\n- When placing lights in the scene, it is necesssary to set an int \"preserveOriginalDrawCall\" to \"1\" in order to keep rendering the original mesh. If another layer is setting this flag somewhere and you want to replace/remove the original mesh in your own layer, you will notice that the original mesh can't be removed without setting this flag back to \"0\". You can do that on your own layer, set it back to \"0\", but make sure your layer comes on top of the other one that sets it to true.\n"},"size":{"kind":"number","value":4604,"string":"4,604"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":79.78947228439523,"string":"79.789472"},"max_line_length":{"kind":"number","value":489,"string":"489"},"alphanum_fraction":{"kind":"number","value":0.7862728060846497,"string":"0.786273"}}},{"rowIdx":9428,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/tools/scripts/link_app.py"},"content":{"kind":"string","value":"import argparse\nimport json\nimport os\nimport sys\n\nimport packmanapi\nimport urllib3\n\n\ndef find_omniverse_apps():\n http = urllib3.PoolManager()\n try:\n r = http.request(\"GET\", \"http://127.0.0.1:33480/components\")\n except Exception as e:\n print(f\"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\\nError: {e}\")\n sys.exit(1)\n\n apps = {}\n for x in json.loads(r.data.decode(\"utf-8\")):\n latest = x.get(\"installedVersions\", {}).get(\"latest\", \"\")\n if latest:\n for s in x.get(\"settings\", []):\n if s.get(\"version\", \"\") == latest:\n root = s.get(\"launch\", {}).get(\"root\", \"\")\n apps[x[\"slug\"]] = (x[\"name\"], root)\n break\n return apps\n\n\ndef create_link(src, dst):\n print(f\"Creating a link '{src}' -> '{dst}'\")\n packmanapi.link(src, dst)\n\n\nAPP_PRIORITIES = [\"code\", \"create\", \"view\"]\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Create folder link to Kit App installed from Omniverse Launcher\")\n parser.add_argument(\n \"--path\",\n help=\"Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'\",\n required=False,\n )\n parser.add_argument(\n \"--app\", help=\"Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'\", required=False\n )\n args = parser.parse_args()\n\n path = args.path\n if not path:\n print(\"Path is not specified, looking for Omniverse Apps...\")\n apps = find_omniverse_apps()\n if len(apps) == 0:\n print(\n \"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers.\"\n )\n sys.exit(0)\n\n print(\"\\nFound following Omniverse Apps:\")\n for i, slug in enumerate(apps):\n name, root = apps[slug]\n print(f\"{i}: {name} ({slug}) at: '{root}'\")\n\n if args.app:\n selected_app = args.app.lower()\n if selected_app not in apps:\n choices = \", \".join(apps.keys())\n print(f\"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}\")\n sys.exit(0)\n else:\n selected_app = next((x for x in APP_PRIORITIES if x in apps), None)\n if not selected_app:\n selected_app = next(iter(apps))\n\n print(f\"\\nSelected app: {selected_app}\")\n _, path = apps[selected_app]\n\n if not os.path.exists(path):\n print(f\"Provided path doesn't exist: {path}\")\n else:\n SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))\n create_link(f\"{SCRIPT_ROOT}/../../app\", path)\n print(\"Success!\")\n"},"size":{"kind":"number","value":2814,"string":"2,814"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":32.11764668096886,"string":"32.117647"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.5621890545265853,"string":"0.562189"}}},{"rowIdx":9429,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/tools/packman/config.packman.xml"},"content":{"kind":"string","value":"\n \n \n \n\n"},"size":{"kind":"number","value":211,"string":"211"},"lang":{"kind":"string","value":"XML"},"avg_line_length":{"kind":"number","value":34.33332761111206,"string":"34.333328"},"max_line_length":{"kind":"number","value":123,"string":"123"},"alphanum_fraction":{"kind":"number","value":0.691943124682734,"string":"0.691943"}}},{"rowIdx":9430,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/tools/packman/bootstrap/install_package.py"},"content":{"kind":"string","value":"# Copyright 2019 NVIDIA CORPORATION\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport shutil\nimport sys\nimport tempfile\nimport zipfile\n\n__author__ = \"hfannar\"\nlogging.basicConfig(level=logging.WARNING, format=\"%(message)s\")\nlogger = logging.getLogger(\"install_package\")\n\n\nclass TemporaryDirectory:\n def __init__(self):\n self.path = None\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, type, value, traceback):\n # Remove temporary data created\n shutil.rmtree(self.path)\n\n\ndef install_package(package_src_path, package_dst_path):\n with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:\n zip_file.extractall(temp_dir)\n # Recursively copy (temp_dir will be automatically cleaned up on exit)\n try:\n # Recursive copy is needed because both package name and version folder could be missing in\n # target directory:\n shutil.copytree(temp_dir, package_dst_path)\n except OSError as exc:\n logger.warning(\"Directory %s already present, packaged installation aborted\" % package_dst_path)\n else:\n logger.info(\"Package successfully installed to %s\" % package_dst_path)\n\n\ninstall_package(sys.argv[1], sys.argv[2])\n"},"size":{"kind":"number","value":1844,"string":"1,844"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":33.16666605246915,"string":"33.166666"},"max_line_length":{"kind":"number","value":108,"string":"108"},"alphanum_fraction":{"kind":"number","value":0.7033622555838599,"string":"0.703362"}}},{"rowIdx":9431,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/add_model.py"},"content":{"kind":"string","value":"import os\nfrom pathlib import Path\nfrom typing import List\n\nimport omni\nfrom omni.client import make_relative_url\nfrom omni.kit.window.file_importer import get_file_importer\nfrom omni.kit.window.file_exporter import get_file_exporter\nimport omni.usd as usd\nfrom pxr import UsdGeom, Usd, Sdf\n\nfrom ekozerski.rtxremixtools.utils import find_inst_hash_prim, find_source_mesh_hash_prim\nfrom ekozerski.rtxremixtools.commons import log_info\nfrom ekozerski.rtxremixtools import mesh_utils\n\n\nclass UserCache:\n LAST_OPENED_MODEL = None\n\n\ndef open_export_dialog_for_captured_mesh(prim_path, mesh):\n def setup_references_in_stage(current_stage, reference_file_location):\n _, mesh_hash, __ = Usd.Prim.GetName(mesh.GetParent()).split('_')\n xform_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}/Xform_{mesh_hash}_0'\n omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=xform_prim_path)\n\n editing_layer = current_stage.GetEditTarget().GetLayer()\n relative_file_path = make_relative_url(editing_layer.realPath, reference_file_location)\n omni.kit.commands.execute('AddReference',\n stage=current_stage,\n prim_path=Sdf.Path(xform_prim_path),\n reference=Sdf.Reference(relative_file_path)\n )\n selection = omni.usd.get_context().get_selection()\n selection.clear_selected_prim_paths()\n source_layer = mesh.GetPrimStack()[-1].layer\n source_layer.Reload()\n selection.set_selected_prim_paths([xform_prim_path], False)\n\n def file_export_handler(filename: str, dirname: str, extension: str = \"\", selections: List[str] = []):\n stage = Usd.Stage.CreateInMemory()\n root_xform = UsdGeom.Xform.Define(stage, '/root').GetPrim()\n stage.SetDefaultPrim(root_xform)\n new_mesh = UsdGeom.Mesh.Define(stage, f'/root/{prim_path.rsplit(\"/\", 1)[-1]}')\n\n needed_attr_names = ['doubleSided', 'extent', 'faceVertexCounts', 'faceVertexIndices', 'normals', 'points', 'primvars:st']\n [\n new_mesh.GetPrim().CreateAttribute(attr.GetName(), attr.GetTypeName()).Set(attr.Get())\n for attr in mesh.GetAttributes()\n if attr.Get() and attr.GetName() in needed_attr_names\n ]\n mesh_utils.convert_mesh_to_vertex_interpolation_mode(new_mesh)\n \n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n upAxis = UsdGeom.GetStageUpAxis(current_stage)\n UsdGeom.SetStageUpAxis(stage, upAxis)\n\n save_location = dirname + filename + extension\n stage.Export(save_location)\n setup_references_in_stage(current_stage, save_location)\n \n log_info(f\"> Exporting {prim_path} in '{save_location}'\")\n\n source_layer = mesh.GetPrimStack()[-1].layer\n rtx_remix_path_parts = source_layer.realPath.split(os.path.join(\"rtx-remix\"), 1)\n if len(rtx_remix_path_parts) > 1:\n rtx_remix_path = os.path.join(rtx_remix_path_parts[0], \"rtx-remix\", \"mods\", \"gameReadyAssets\")\n else:\n rtx_remix_path = source_layer.realPath\n \n rtx_remix_path = os.path.join(rtx_remix_path, \"CustomMesh\")\n \n file_exporter = get_file_exporter()\n file_exporter.show_window(\n title=f'Export \"{prim_path}\"',\n export_button_label=\"Save\",\n export_handler=file_export_handler,\n filename_url=rtx_remix_path,\n )\n\n\ndef copy_original_mesh(prim_path, mesh, output_path):\n stage = Usd.Stage.CreateInMemory()\n root_xform = UsdGeom.Xform.Define(stage, '/root').GetPrim()\n stage.SetDefaultPrim(root_xform)\n new_mesh = UsdGeom.Mesh.Define(stage, f'/root/{prim_path.rsplit(\"/\", 1)[-1]}')\n\n needed_attr_names = ['doubleSided', 'extent', 'faceVertexCounts', 'faceVertexIndices', 'normals', 'points', 'primvars:st']\n [\n new_mesh.GetPrim().CreateAttribute(attr.GetName(), attr.GetTypeName()).Set(attr.Get())\n for attr in mesh.GetAttributes()\n if attr.Get() and attr.GetName() in needed_attr_names\n ]\n mesh_utils.convert_mesh_to_vertex_interpolation_mode(new_mesh)\n \n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n upAxis = UsdGeom.GetStageUpAxis(current_stage)\n UsdGeom.SetStageUpAxis(stage, upAxis)\n\n stage.Export(output_path)\n\n\ndef setup_references_in_stage(mesh, current_stage, reference_file_location):\n inst_hash_prim = find_inst_hash_prim(mesh)\n _, mesh_hash, __ = Usd.Prim.GetName(inst_hash_prim).split('_')\n export_prim_name = os.path.basename(reference_file_location).split('.', 1)[0]\n xform_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}/{export_prim_name}'\n omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=xform_prim_path)\n\n editing_layer = current_stage.GetEditTarget().GetLayer()\n relative_file_path = make_relative_url(editing_layer.realPath, reference_file_location)\n omni.kit.commands.execute('AddReference',\n stage=current_stage,\n prim_path=Sdf.Path(xform_prim_path),\n reference=Sdf.Reference(relative_file_path)\n )\n source_layer = mesh.GetPrimStack()[-1].layer\n source_layer.Reload()\n selection = omni.usd.get_context().get_selection()\n selection.clear_selected_prim_paths()\n selection.set_selected_prim_paths([xform_prim_path], False)\n\n\ndef open_export_dialog_for_captured_mesh(prim_path, mesh):\n def export_mesh(filename: str, dirname: str, extension: str = \"\", selections: List[str] = []):\n file_location = dirname + filename + extension\n copy_original_mesh(prim_path, mesh, file_location)\n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n setup_references_in_stage(mesh, current_stage, file_location)\n \n source_layer = mesh.GetPrimStack()[-1].layer\n rtx_remix_path_parts = source_layer.realPath.split(os.path.join(\"rtx-remix\"), 1)\n rtx_remix_path = source_layer.realPath\n if len(rtx_remix_path_parts) > 1:\n rtx_remix_path = os.path.join(rtx_remix_path_parts[0], \"rtx-remix\", \"mods\", \"gameReadyAssets\")\n \n rtx_remix_path = os.path.join(rtx_remix_path, \"CustomMesh\")\n \n file_exporter = get_file_exporter()\n file_exporter.show_window(\n title=f'Export \"{prim_path}\"',\n export_button_label=\"Save\",\n export_handler=export_mesh,\n filename_url=rtx_remix_path,\n )\n\n\ndef open_import_dialog_for_add_models(prim_path):\n def import_mesh(filename: str, dirname: str, selections: List[str] = []):\n # TODO: Loop through all selections and add them all to the mesh_HASH with their respective xforms correctly named without collisions.\n mesh_path = mesh.GetPath().pathString\n new_selection = list()\n counter = 0\n for reference_file in selections:\n xform_name = Path(reference_file).stem\n new_mesh_path = mesh_path + f'/{xform_name}_{counter}'\n while current_stage.GetPrimAtPath(new_mesh_path).IsValid():\n counter += 1\n new_mesh_path = mesh_path + f'/{xform_name}_{counter}'\n\n omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=new_mesh_path)\n\n editing_layer = current_stage.GetEditTarget().GetLayer()\n relative_file_path = make_relative_url(editing_layer.realPath, reference_file)\n omni.kit.commands.execute('AddReference',\n stage=current_stage,\n prim_path=Sdf.Path(new_mesh_path),\n reference=Sdf.Reference(relative_file_path)\n )\n new_selection.append(new_mesh_path)\n UserCache.LAST_OPENED_MODEL = os.path.dirname(reference_file)\n counter += 1\n source_layer = mesh.GetPrimStack()[-1].layer\n source_layer.Reload()\n selection = omni.usd.get_context().get_selection()\n selection.clear_selected_prim_paths()\n selection.set_selected_prim_paths(new_selection, False)\n \n\n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n inst_prim = current_stage.GetPrimAtPath(prim_path)\n mesh = find_source_mesh_hash_prim(current_stage, inst_prim)\n\n source_layer = mesh.GetPrimStack()[-1].layer\n filename_url = UserCache.LAST_OPENED_MODEL if UserCache.LAST_OPENED_MODEL is not None else source_layer.realPath\n\n file_importer = get_file_importer()\n file_importer.show_window(\n title=f'Import Models',\n import_button_label=\"Import\",\n import_handler=import_mesh,\n filename_url=filename_url,\n )\n\n\ndef open_add_model_dialog():\n for path in usd.get_context().get_selection().get_selected_prim_paths():\n open_import_dialog_for_add_models(path)\n\n\ndef open_mesh_replacement_setup_dialog():\n for path, mesh in mesh_utils.get_selected_mesh_prims().items():\n if mesh_utils.is_a_captured_mesh(mesh):\n open_export_dialog_for_captured_mesh(path, mesh)\n"},"size":{"kind":"number","value":8826,"string":"8,826"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":41.234449563471536,"string":"41.23445"},"max_line_length":{"kind":"number","value":142,"string":"142"},"alphanum_fraction":{"kind":"number","value":0.6607749829298918,"string":"0.660775"}}},{"rowIdx":9432,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/commons.py"},"content":{"kind":"string","value":"import carb\n\n\ndef log_info(msg: str):\n carb.log_info(f\"[RTX Remix Tool] {msg}\")\n\n\ndef log_warn(msg: str):\n carb.log_warn(f\"[RTX Remix Tool] {msg}\")\n\n\ndef log_error(msg: str):\n carb.log_error(f\"[RTX Remix Tool] {msg}\")\n"},"size":{"kind":"number","value":227,"string":"227"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":15.28571319387763,"string":"15.285713"},"max_line_length":{"kind":"number","value":45,"string":"45"},"alphanum_fraction":{"kind":"number","value":0.621145371713016,"string":"0.621145"}}},{"rowIdx":9433,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/extension.py"},"content":{"kind":"string","value":"import omni.ext\nimport omni.ui as ui\nfrom omni.kit import context_menu\nfrom omni.kit.hotkeys.core import get_hotkey_registry\nfrom omni.kit.actions.core import get_action_registry\n\nfrom . import commons\nfrom .rtx_context_menu import build_rtx_remix_menu\n\n\n# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be\n# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled\n# on_shutdown() is called.\nclass RtxRemixTools(omni.ext.IExt):\n def on_startup(self, ext_id):\n self.ext_id = ext_id\n commons.log_info(f\"Starting Up\")\n\n menu = {\"name\": \"RTX Remix\", \"populate_fn\": build_rtx_remix_menu}\n self._context_menu_subscription = context_menu.add_menu(menu, \"MENU\", \"\")\n self.hotkey_registry = get_hotkey_registry()\n\n register_actions(self.ext_id)\n self.select_source_mesh_hotkey = self.hotkey_registry.register_hotkey(\n self.ext_id,\n \"SHIFT + F\",\n self.ext_id,\n \"select_source_mesh\",\n filter=None,\n )\n \n\n def on_shutdown(self):\n commons.log_info(f\"Shutting Down\")\n # remove event\n self._context_menu_subscription.release()\n self.hotkey_registry.deregister_hotkey(\n self.select_source_mesh_hotkey,\n )\n deregister_actions(self.ext_id)\n\n\ndef register_actions(extension_id):\n from . import select_source_mesh\n\n action_registry = get_action_registry()\n actions_tag = \"RTX Remix Tools Actions\"\n\n action_registry.register_action(\n extension_id,\n \"select_source_mesh\",\n select_source_mesh.select_source_meshes,\n display_name=\"Select Source Mesh\",\n description=\"Selects the corresponding mesh_HASH the prim is related to.\",\n tag=actions_tag,\n )\n\n\ndef deregister_actions(extension_id):\n action_registry = get_action_registry()\n action_registry.deregister_all_actions_for_extension(extension_id)\n\n"},"size":{"kind":"number","value":2043,"string":"2,043"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":31.444443945326288,"string":"31.444444"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.664708761299702,"string":"0.664709"}}},{"rowIdx":9434,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/__init__.py"},"content":{"kind":"string","value":"from .extension import *\n"},"size":{"kind":"number","value":25,"string":"25"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":11.999994000003,"string":"11.999994"},"max_line_length":{"kind":"number","value":24,"string":"24"},"alphanum_fraction":{"kind":"number","value":0.7599999696000012,"string":"0.76"}}},{"rowIdx":9435,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/mesh_utils.py"},"content":{"kind":"string","value":"from collections import OrderedDict\nimport os\nfrom pxr import UsdGeom, Usd, Sdf\nimport omni.usd as usd\n\nfrom ekozerski.rtxremixtools.commons import log_error\n\n\ndef get_selected_mesh_prims():\n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n selection = ctx.get_selection().get_selected_prim_paths()\n selected_prims = {\n path: current_stage.GetPrimAtPath(path)\n for path in selection\n }\n meshes = {\n prim_path: prim\n for prim_path, prim in selected_prims.items()\n if UsdGeom.Mesh(prim)\n }\n\n return meshes\n\n\ndef convert_mesh_to_vertex_interpolation_mode(mesh):\n \"\"\"\n This method attemps to convert Remix meshes' interpolation mode from constant or faceVarying to vertex.\n If there is any faceVarying attribute, it means the data arrays (points, uvs, normals...) will have different\n lengths, so this script will copy data around using the faceVertexIndices array to ensure they all end up with the\n same length.\n \"\"\"\n # TODO: Study interpolation modes in depth to implement a decent conversion script.\n prim = mesh.GetPrim()\n primvar_api = UsdGeom.PrimvarsAPI(prim)\n primvars = {var for var in primvar_api.GetPrimvars()}\n face_varying_primvars = [v for v in primvars if v.GetInterpolation() == UsdGeom.Tokens.faceVarying]\n if face_varying_primvars or mesh.GetNormalsInterpolation() == UsdGeom.Tokens.faceVarying:\n non_face_varying_primvars = list(primvars.difference(face_varying_primvars))\n non_face_varying_primvars = [var for var in non_face_varying_primvars if var.GetInterpolation() != 'uniform']\n indices = prim.GetAttribute(\"faceVertexIndices\")\n\n # Settings points separately since it doesn't have a \"SetInterpolation\" like primvars.\n points = prim.GetAttribute(\"points\")\n points_arr = points.Get()\n new_arr = [points_arr[i] for i in indices.Get()]\n points.Set(new_arr)\n\n for var in non_face_varying_primvars:\n original_arr = var.Get()\n if original_arr:\n new_arr = [original_arr[i] for i in indices.Get()]\n var.Set(new_arr)\n \n indices.Set([i for i in range(len(indices.Get()))])\n \n [var.SetInterpolation(UsdGeom.Tokens.vertex) for var in primvars]\n mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)\n\n\ndef convert_uv_primvars_to_st(mesh):\n # https://github.com/NVIDIAGameWorks/dxvk-remix/blob/ebb0ecfd638d6a32ab5f10708b5b07bc763cf79b/src/dxvk/rtx_render/rtx_mod_usd.cpp#L696\n # https://github.com/Kim2091/RTXRemixTools/blob/8ae25224ef8d1d284f3e208f671b2ce6a35b82af/RemixMeshConvert/For%20USD%20Composer/RemixMeshConvert_OV.py#L4\n known_uv_names = [\n 'primvars:st',\n 'primvars:uv',\n 'primvars:st0',\n 'primvars:st1',\n 'primvars:st2',\n 'primvars:UVMap',\n 'primvars:UVChannel_1',\n 'primvars:map1',\n ]\n # Preserving the order of found primvars to use the first one, in case a primvars:st can't be found.\n primvar_api = UsdGeom.PrimvarsAPI(mesh)\n uv_primvars = OrderedDict(\n (primvar.GetName(), primvar)\n for primvar in primvar_api.GetPrimvars()\n if primvar.GetTypeName().role == 'TextureCoordinate'\n or primvar.GetName() in known_uv_names\n )\n if not uv_primvars:\n return\n \n # Picking only one UV and blowing up everything else as the runtime only reads the first anyway.\n considered_uv = uv_primvars.get('primvars:st') or next(iter(uv_primvars.values()))\n uv_data = considered_uv.Get()\n [primvar_api.RemovePrimvar(uv_name) for uv_name in uv_primvars.keys()]\n\n # Recreating the primvar with appropriate name, type and role\n new_uv_primvar = primvar_api.CreatePrimvar('primvars:st', Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.vertex)\n new_uv_primvar.Set(uv_data)\n\n\ndef remove_unused_primvars(mesh):\n unused_primvar_names = [\n 'primvars:displayColor',\n 'primvars:displayOpacity',\n ]\n primvar_api = UsdGeom.PrimvarsAPI(mesh)\n [primvar_api.RemovePrimvar(uv_name) for uv_name in unused_primvar_names]\n\n\ndef fix_meshes_in_file(usd_file_path):\n stage = Usd.Stage.Open(usd_file_path)\n mesh_prims = [prim for prim in stage.TraverseAll() if UsdGeom.Mesh(prim)]\n for prim in mesh_prims:\n faceVertices = prim.GetAttribute(\"faceVertexCounts\").Get()\n if not faceVertices or not all({x == 3 for x in faceVertices}):\n log_error(f\"Mesh {prim.GetPath()} in '{usd_file_path}' hasn't been triangulated and this tools doesn't do that for you yet :(\")\n continue\n convert_mesh_to_vertex_interpolation_mode(UsdGeom.Mesh(prim))\n convert_uv_primvars_to_st(UsdGeom.Mesh(prim))\n remove_unused_primvars(UsdGeom.Mesh(prim))\n\n stage.Save()\n\n\ndef is_a_captured_mesh(mesh):\n \"\"\"\n Returns True if the Mesh's defining USD file is located in the captures folder.\n \"\"\"\n return os.path.normpath(\"captures/meshes\") in os.path.normpath(mesh.GetPrimStack()[-1].layer.realPath)\n\n\n\ndef fix_meshes_geometry():\n meshes = {k: v for k,v in get_selected_mesh_prims().items() if not is_a_captured_mesh(v)}\n for path, mesh in meshes.items():\n source_layer = mesh.GetPrimStack()[-1].layer\n fix_meshes_in_file(source_layer.realPath)\n source_layer.Reload()\n"},"size":{"kind":"number","value":5330,"string":"5,330"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":39.08270647306236,"string":"39.082706"},"max_line_length":{"kind":"number","value":156,"string":"156"},"alphanum_fraction":{"kind":"number","value":0.6795497184466135,"string":"0.67955"}}},{"rowIdx":9436,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/add_material.py"},"content":{"kind":"string","value":"import os\nfrom typing import List\nfrom omni import usd, kit\nfrom omni.kit.window.file_importer import get_file_importer\nfrom omni.client import make_relative_url\n\nfrom ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim\n\n\ndef open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage):\n def create_material_from_mdl_file(filename: str, dirname: str, selections: List[str] = []):\n if not filename.endswith('mdl'):\n raise ValueError(f\"The selected file '{filename}' doesn't have a mdl extension.\")\n \n mesh_hash_path = mesh_hash.GetPath().pathString\n counter = 0\n material_name = os.path.basename(filename).replace('.mdl', '')\n new_material_path = mesh_hash_path + f'/{material_name}_{counter}'\n while current_stage.GetPrimAtPath(new_material_path).IsValid():\n counter += 1\n new_material_path = mesh_hash_path + f'/{material_name}_{counter}'\n\n # TODO: Get material name by inspecting the MDL file rather than guessing from it's name, so users can \n # rename it at will.\n mtl_name = 'AperturePBR_Opacity' if 'Opacity' in filename else 'AperturePBR_Translucent'\n editing_layer = current_stage.GetEditTarget().GetLayer()\n relative_file_path = make_relative_url(editing_layer.realPath, os.path.join(dirname, filename))\n success, _ = kit.commands.execute('CreateMdlMaterialPrimCommand',\n mtl_url=relative_file_path,\n mtl_name=mtl_name,\n mtl_path=new_material_path,\n select_new_prim=True,\n )\n\n def filter_handler(filename: str, _, extension_option):\n if extension_option == '.mdl':\n return filename.lower().endswith('.mdl')\n return True\n\n file_importer = get_file_importer()\n file_importer.show_window(\n title=f'Select MDL File',\n import_button_label=\"Select\",\n import_handler=create_material_from_mdl_file,\n file_extension_types=[(\".mdl\", \"Opacity or Translucent MDL file\")],\n file_filter_handler=filter_handler,\n )\n\n\ndef open_add_material_dialog():\n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n selection = ctx.get_selection().get_selected_prim_paths()\n selected_prims = {\n path: current_stage.GetPrimAtPath(path)\n for path in selection\n }\n source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]\n source_meshes = set([mesh for mesh in source_meshes if mesh is not None])\n\n for mesh_hash in list(source_meshes):\n open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage)\n"},"size":{"kind":"number","value":2650,"string":"2,650"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":41.079364427311674,"string":"41.079364"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.6592452827700962,"string":"0.659245"}}},{"rowIdx":9437,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/utils.py"},"content":{"kind":"string","value":"from pxr import Usd\nfrom omni import usd\n\n\ndef find_source_mesh_hash_prim(current_stage, prim):\n if not current_stage.GetPrimAtPath('/RootNode/meshes'):\n return prim\n \n search_prim = prim\n valid_paths = ['/RootNode/meshes', '/RootNode/instances']\n while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString not in valid_paths:\n search_prim = search_prim.GetParent()\n \n if not search_prim:\n return None\n \n if 'mesh_' in Usd.Prim.GetName(search_prim):\n return search_prim\n\n _, mesh_hash, __ = Usd.Prim.GetName(search_prim).split('_')\n mesh_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}'\n return current_stage.GetPrimAtPath(mesh_prim_path)\n \n\ndef find_inst_hash_prim(instance_mesh):\n search_prim = instance_mesh\n root_path = '/RootNode/instances'\n while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString != root_path:\n search_prim = search_prim.GetParent()\n\n if not search_prim:\n return None\n \n return search_prim\n"},"size":{"kind":"number","value":1075,"string":"1,075"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":29.74285629306125,"string":"29.742856"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.6651162784510546,"string":"0.665116"}}},{"rowIdx":9438,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/preserve_draw_calls.py"},"content":{"kind":"string","value":"from omni import usd, kit\nfrom pxr import Sdf\n\nfrom ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim\n\n\ndef set_preserve_original_draw_call(enabled: bool = False):\n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n selection = ctx.get_selection().get_selected_prim_paths()\n selected_prims = {\n path: current_stage.GetPrimAtPath(path)\n for path in selection\n }\n\n source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]\n source_meshes = set([mesh for mesh in source_meshes if mesh is not None])\n for mesh_prim in source_meshes:\n kit.commands.execute(\n 'CreateUsdAttributeCommand',\n prim=mesh_prim,\n attr_name='preserveOriginalDrawCall',\n attr_type=Sdf.ValueTypeNames.Int,\n attr_value=1 if enabled else 0\n )\n"},"size":{"kind":"number","value":880,"string":"880"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":32.88461411982253,"string":"32.884614"},"max_line_length":{"kind":"number","value":105,"string":"105"},"alphanum_fraction":{"kind":"number","value":0.6647727265173037,"string":"0.664773"}}},{"rowIdx":9439,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/rtx_context_menu.py"},"content":{"kind":"string","value":"from omni.kit.ui import get_custom_glyph_code\nfrom omni import usd\nimport omni.ui as ui\n\nfrom . import mesh_utils\nfrom . import add_model\nfrom . import add_material\nfrom . import preserve_draw_calls\nfrom . import select_source_mesh\n\n\ndef _build_fix_mesh_geometry_menu_item():\n tooltip = ''.join([\n 'Interpolation Mode\\n',\n 'OBS: Operation Can\\'t be undone\\n',\n ' RTX Remix runtime only supports \"vertex\" interpolation mode, in which \"points\", \"normals\" and \"uvs\" arrays ',\n 'must have the same length, but DCC tools usually export the mesh using \"faceVarying\" interpolation mode.',\n 'This operation reorganizes the geometry to be compatible with the runtime. See:\\n',\n ' \"Interpolation of Geometric Primitive Variables\" - https://openusd.org/dev/api/class_usd_geom_primvar.html',\n '\\n\\nThis operation only applies for meshes inside the mods folder, not the captured ones.',\n ])\n ui.MenuItem(\n \"Fix Meshes Geometry\",\n triggered_fn=mesh_utils.fix_meshes_geometry,\n enabled=any([\n not mesh_utils.is_a_captured_mesh(mesh)\n for mesh in mesh_utils.get_selected_mesh_prims().values()\n ]),\n tooltip=tooltip\n )\n\n\ndef _build_setup_for_mesh_replacements_menu_item():\n tooltip = ''.join([\n \"Export the original mesh to a selected location and setup the references to work within the runtime so you\",\n \" can focus on remodeling the mesh and export back at the same location.\"\n ])\n ui.MenuItem(\n \"Setup for Mesh Replacement\",\n triggered_fn=add_model.open_mesh_replacement_setup_dialog,\n enabled=any([\n mesh_utils.is_a_captured_mesh(mesh)\n for mesh in mesh_utils.get_selected_mesh_prims().values()\n ]),\n tooltip=tooltip\n )\n\n\ndef _build_add_model_menu_item():\n tooltip = ''.join([\n \"Add external authored meshes to the prim, setting up properly to work within the runtime.\"\n ])\n ui.MenuItem(\n \"Add Model\",\n triggered_fn=add_model.open_add_model_dialog,\n tooltip=tooltip,\n enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())\n )\n\n\ndef _build_add_material_menu_item():\n tooltip = ''.join([\n \"Add a material defined from an external MDL file to the selected prim.\"\n ])\n ui.MenuItem(\n \"Add Material\",\n triggered_fn=add_material.open_add_material_dialog,\n tooltip=tooltip,\n enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())\n )\n\n\ndef _build_preserve_original_draw_call_menu_item():\n tooltip = ''.join([\n \"Add a 'custom int preserveOriginalDrawCall' attribute set to '1' to the mesh_HASH prim. Used to indicate to\",\n \" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom \",\n \" lights without removing the original mesh from rendering.\"\n ])\n ui.MenuItem(\n \"Preserve\",\n triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(True),\n tooltip=tooltip,\n enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())\n )\n\n\ndef _build_dont_preserve_original_draw_call_menu_item():\n tooltip = ''.join([\n \"Add a 'custom int preserveOriginalDrawCall' attribute set to '0' to the mesh_HASH prim. Used to indicate to\",\n \" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom \",\n \" lights without removing the original mesh from rendering.\"\n ])\n ui.MenuItem(\n \"Don't Preserve\",\n triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(False),\n tooltip=tooltip,\n enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())\n )\n\n\ndef _build_select_source_meshes_menu():\n tooltip = ''.join([\n \"Selects the corresponding mesh_HASH the prim is related to.\"\n ])\n ui.MenuItem(\n \"Select Source Mesh (Shift + F)\",\n triggered_fn=select_source_mesh.select_source_meshes,\n tooltip=tooltip,\n enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())\n )\n\n\ndef build_rtx_remix_menu(event):\n icon = get_custom_glyph_code(\"${glyphs}/menu_create.svg\")\n with ui.Menu(f' {icon} RTX Remix'):\n _build_fix_mesh_geometry_menu_item()\n _build_setup_for_mesh_replacements_menu_item()\n _build_add_model_menu_item()\n _build_add_material_menu_item()\n with ui.Menu(f'Original Draw Call Preservation'):\n _build_preserve_original_draw_call_menu_item()\n _build_dont_preserve_original_draw_call_menu_item()\n _build_select_source_meshes_menu()\n"},"size":{"kind":"number","value":4736,"string":"4,736"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":37.2016126032128,"string":"37.201613"},"max_line_length":{"kind":"number","value":122,"string":"122"},"alphanum_fraction":{"kind":"number","value":0.6528716214837686,"string":"0.652872"}}},{"rowIdx":9440,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/select_source_mesh.py"},"content":{"kind":"string","value":"from omni import usd\n\nfrom ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim\n\n\ndef select_source_meshes():\n ctx = usd.get_context()\n current_stage = ctx.get_stage()\n selection = ctx.get_selection().get_selected_prim_paths()\n selected_prims = {\n path: current_stage.GetPrimAtPath(path)\n for path in selection\n }\n\n source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]\n source_meshes = set([mesh for mesh in source_meshes if mesh is not None])\n paths = [mesh.GetPath().pathString for mesh in source_meshes]\n selection = usd.get_context().get_selection()\n selection.clear_selected_prim_paths()\n selection.set_selected_prim_paths(paths, False)\n"},"size":{"kind":"number","value":749,"string":"749"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":34.71428406122457,"string":"34.714284"},"max_line_length":{"kind":"number","value":105,"string":"105"},"alphanum_fraction":{"kind":"number","value":0.7049399189520161,"string":"0.70494"}}},{"rowIdx":9441,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/tests/__init__.py"},"content":{"kind":"string","value":"from .test_hello_world import *"},"size":{"kind":"number","value":31,"string":"31"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":30.999969000031,"string":"30.999969"},"max_line_length":{"kind":"number","value":31,"string":"31"},"alphanum_fraction":{"kind":"number","value":0.7741935234131121,"string":"0.774194"}}},{"rowIdx":9442,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/tests/test_hello_world.py"},"content":{"kind":"string","value":"# NOTE:\n# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests\n# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html\nimport omni.kit.test\n\n# Extnsion for writing UI tests (simulate UI interaction)\nimport omni.kit.ui_test as ui_test\n\n# Import extension python module we are testing with absolute import path, as if we are external user (other extension)\nimport ekozerski.rtxremixtools\n\n\n# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test\nclass Test(omni.kit.test.AsyncTestCase):\n # Before running each test\n async def setUp(self):\n pass\n\n # After running each test\n async def tearDown(self):\n pass\n\n # Actual test, notice it is \"async\" function, so \"await\" can be used if needed\n @omni.kit.test.omni_test_registry(guid=\"f898a949-bacc-41f5-be56-b4eb8923f54e\")\n async def test_hello_public_function(self):\n result = ekozerski.rtxremixtools.some_public_function(4)\n self.assertEqual(result, 256)\n\n\n @omni.kit.test.omni_test_registry(guid=\"4626d574-659f-4a85-8958-9fa8588fbce3\")\n async def test_window_button(self):\n\n # Find a label in our window\n label = ui_test.find(\"My Window//Frame/**/Label[*]\")\n\n # Find buttons in our window\n add_button = ui_test.find(\"My Window//Frame/**/Button[*].text=='Add'\")\n reset_button = ui_test.find(\"My Window//Frame/**/Button[*].text=='Reset'\")\n\n # Click reset button\n await reset_button.click()\n self.assertEqual(label.widget.text, \"empty\")\n\n await add_button.click()\n self.assertEqual(label.widget.text, \"count: 1\")\n\n await add_button.click()\n self.assertEqual(label.widget.text, \"count: 2\")\n"},"size":{"kind":"number","value":1848,"string":"1,848"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":36.73469312786341,"string":"36.734693"},"max_line_length":{"kind":"number","value":142,"string":"142"},"alphanum_fraction":{"kind":"number","value":0.6915584411842216,"string":"0.691558"}}},{"rowIdx":9443,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/config/extension.toml"},"content":{"kind":"string","value":"[core]\nreloadable = true\n\n\n[package]\n# Semantic Versioning is used: https://semver.org/\nversion = \"0.0.2\"\n\n# Lists people or organizations that are considered the \"authors\" of the package.\nauthors = [\"Emanuel Kozerski\"]\n\n# The title and description fields are primarily for displaying extension info in UI\ntitle = \"RTX Remix Tools\"\ndescription=\"Simple toolkit for creating remixing assets compatible with RTX Remix runtime\"\n\n# Path (relative to the root) or content of readme markdown file for UI.\nreadme = \"docs/README.md\"\n\n# URL of the extension source repository.\nrepository = \"https://github.com/Ekozmaster/Nvidia-Omniverse-RTX-Remix-Tools\"\n\n# One of categories for UI.\ncategory = \"Other\"\n\n# Keywords for the extension\nkeywords = [\"Tool\", \"Toolkit\", \"Tools\", \"RTX\", \"Remix\"]\n\n# Location of change log file in target (final) folder of extension, relative to the root.\n# More info on writing changelog: https://keepachangelog.com/en/1.0.0/\nchangelog=\"docs/CHANGELOG.md\"\n\n# Preview image and icon. Folder named \"data\" automatically goes in git lfs (see .gitattributes file).\n# Preview image is shown in \"Overview\" of Extensions window. Screenshot of an extension might be a good preview image.\npreview_image = \"data/preview.png\"\n\n# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.\nicon = \"data/icon.png\"\n\n\n# Use omni.ui to build simple UI\n[dependencies]\n\"omni.kit.uiapp\" = {}\n\n\n# Main python module this extension provides, it will be publicly available as \"import ekozerski.rtxremixtools\".\n[[python.module]]\nname = \"ekozerski.rtxremixtools\"\n\n\n[[test]]\n# Extra dependencies only to be used during test run\ndependencies = [\n \"omni.kit.ui_test\" # UI testing extension\n]\n"},"size":{"kind":"number","value":1709,"string":"1,709"},"lang":{"kind":"string","value":"TOML"},"avg_line_length":{"kind":"number","value":30.090908543801664,"string":"30.090909"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.7472205964030306,"string":"0.747221"}}},{"rowIdx":9444,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/docs/CHANGELOG.md"},"content":{"kind":"string","value":"# Changelog\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).\n\n## [0.0.3] - 2023-12-22\n- \"Add Model\", \"Add Material\" and \"Fix Mesh Geometry\" also works when not in a capture scene now.\n- Fixed somes errors when using \"Fix Mesh Geometry\" option in some meshes.\n- Added \"Shift + F\" hotkey to \"Select Source Mesh\".\n- Fixed error when using \"Setup for Mesh Replacement\" on captures which nests original game meshes inside a \"ref\" Xform.\n- Added convertion of many \"primvar:*\" name variations for UV-related primvars to \"primvars:st\" while discarding extra UV maps.\n- Removing unused primvars \"displayColor\" and \"displayOpacity\".\n- Xforms from added models and materials now are named according to the imported file rather than Xform_HASH_x\n\n## [0.0.2] - 2023-08-28\n- Fixing relative paths converted to absolute on the \"Fix Meshes Geometry\" function.\n- Picking best UV map available between all primvars and discarding everything else in the \"Fix Meshes Geometry\"\n- Removing unused primvars when using the \"Fix Meshes Geometry\".\n- Few more bugfixes.\n\n## [0.0.1] - 2023-08-25\n- Initial version\n- Added \"Fix Meshes Geometry\" option converting interpolation mode to \"vertex\".\n- Added \"Setup for Mesh Replacement\" option to export the original mesh for remodeling by external DCC tools.\n- Added \"Add Model\" option to add external authored .USD models to the mesh_HASH prim.\n- Added \"Add Material\" option to add MDL materials to the mesh_HASH prim.\n- Added \"Original Draw Call Preservation\" submenu to set.\n- Added \"Select Source Mesh\" option to quickly select the mesh_HASH prim.\n"},"size":{"kind":"number","value":1603,"string":"1,603"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":56.28571227551027,"string":"56.285712"},"max_line_length":{"kind":"number","value":127,"string":"127"},"alphanum_fraction":{"kind":"number","value":0.7585776664013864,"string":"0.758578"}}},{"rowIdx":9445,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/docs/README.md"},"content":{"kind":"string","value":"# RTX Remix Tools [ekozerski.rtxremixtools]\n\nFocusing on improving RTX Remix modding workflows, this extension is designed to speed up iteration when producing assets and mods by providing useful UI operations inside Omniverse apps like USD Composer/Create or Code.\n\nIt provides some options for the \"Right click\" context menu to setup ideal replacement assets, as well as some converting operations to ensure assets will be compatible with the Remix runtime.\n\nIt is primarily designed to operate on Remix captured scenes, so users can have instant feedbacks on what their mods are gonna look like in the game scenes and iterate faster.\n\n\n## Available Tools\n### Fix Meshes Geometry\n(Operation is performed on every mesh of a USD/USDA source file and can\\'t be undone)\n\nInterpolation Mode\n- RTX Remix runtime only supports meshes with \"vertex\" interpolation mode, in which \"points\" \"normals\" and \"uvs\" arrays \nmust have the same length, but DCC tools usually export the mesh using \"faceVarying\" interpolation mode.\nThis operation reorganizes the geometry to be compatible with the runtime.\n- See: \"Interpolation of Geometric Primitive Variables\" - https://openusd.org/dev/api/class_usd_geom_primvar.html\n- This operation only applies for meshes inside the mods folder, not the captured ones.\n\nUV Maps\n- The runtime supports one single UV map per mesh, which should have one of a few known names, so this script finds many variations, picks one and renames to the standard \"primvars:st\", while also setting the appropriate type as \"TextureCoordinate\" (TexCoord2fArray / TexCoord2f[]). The other UVmaps are discarded.\n\nUnused Primvars\n- displayColor and displayOpacity are now removed from the mesh.\n\n### Setup for Mesh Replacement\nExports the selected mesh in a selected path, already setting up the replacements and references to work in the runtime, so for every change the user only needs to:\n- Open the exported mesh in it's DCC of choice, make the changes and export again (with the right settings, triangulating faces, no materials, etc.)\n- Back in OV, refresh the reference to see the changes in the captured scene.\n- Use the \"Fix Meshes Geometry\" again to make it Remix-compatible.\n- Enjoy.\n\nThe original mesh is kept in case the user only wants to add more models. Make sure to delete it if the intention is to completely replace the original mesh.\n\n### Add Model\nIf the user already has authored USD models, this option allows to select multiple models and add to the mesh_HASH prim.\n\n### Add Material\nThis option allows to select a material .MDL file (AperturePBR_Opacity.mdl or AperturePBR_Translucent.mdl) to add a material prim to the mesh_HASH prim.\n\n### Original Draw Call Preservation\nAllows to set the \"custom int preserveOriginalDrawCall\" attribute to indicate whether the runtime should be forced to render the original mesh or not. Must be set to 1 when placing custom lights or else the original mesh disappears. PS: Remember to set this to 0 if you want to make a mesh replacement and remove the original mesh.\n\n### Select Source Mesh\nQuick way to select the originial source mesh_HASH prim in the scene when you have an instance prim selected.\n\n \n\n## Things to Keep in mind\n- In a capture scene, any changes made to the \"inst_SOMEHASH_x\" prims won't show up in the runtime, so every changes must be done in the \"mesh_SOMEHASH\" they're referencing. Whenever the user clicks a inst_ prim to perform an action like Fixing geometry or Add Model (Ex: Right clicking in the 3D viewport), this tool will try to find the referenced mesh_SOMEHASH and perform the operations in it instead.\n- Having that in mind, always keep an eye in the \"Layers\" tab to check if you have done any changes to the \"instances\" path. Try to delete those changes as much as possible.\n- The only material types that work in the runtime are described in the AperturePBR_Opacity.MDL and AperturePBR_Translucent.MDL, and every mesh must be triangulated. If you want to add a model you got from somewhere else like an asset store, make sure to convert the assets to work in the runtime.\n- When placing lights in the scene, it is necesssary to set an int \"preserveOriginalDrawCall\" to \"1\" in order to keep rendering the original mesh. If another layer is setting this flag somewhere and you want to replace/remove the original mesh in your own layer, you will notice that the original mesh can't be removed without setting this flag back to \"0\". You can do that on your own layer, set it back to \"0\", but make sure your layer comes on top of the other one that sets it to true.\n"},"size":{"kind":"number","value":4574,"string":"4,574"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":82.18181668760333,"string":"82.181817"},"max_line_length":{"kind":"number","value":489,"string":"489"},"alphanum_fraction":{"kind":"number","value":0.7868386530855184,"string":"0.786839"}}},{"rowIdx":9446,"cells":{"file_path":{"kind":"string","value":"Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/docs/index.rst"},"content":{"kind":"string","value":"ekozerski.rtxremixtools\n#############################\n\nExample of Python only extension\n\n\n.. toctree::\n :maxdepth: 1\n\n README\n CHANGELOG\n\n\n.. automodule::\"ekozerski.rtxremixtools\"\n :platform: Windows-x86_64, Linux-x86_64\n :members:\n :undoc-members:\n :show-inheritance:\n :imported-members:\n :exclude-members: contextmanager\n"},"size":{"kind":"number","value":347,"string":"347"},"lang":{"kind":"string","value":"reStructuredText"},"avg_line_length":{"kind":"number","value":15.571427829932007,"string":"15.571428"},"max_line_length":{"kind":"number","value":43,"string":"43"},"alphanum_fraction":{"kind":"number","value":0.6340057618616549,"string":"0.634006"}}},{"rowIdx":9447,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/CONTRIBUTING.md"},"content":{"kind":"string","value":"\n## Contribution Rules\n\n#### Issue Tracking\n\n* All enhancement, bugfix, or change requests must begin with the creation of a [TensorRT Issue Request](https://github.com/nvidia/TensorRT/issues).\n * The issue request must be reviewed by TensorRT engineers and approved prior to code review.\n\n\n#### Coding Guidelines\n\n- All source code contributions must strictly adhere to the [TensorRT Coding Guidelines](CODING-GUIDELINES.md).\n\n- In addition, please follow the existing conventions in the relevant file, submodule, module, and project when you add new code or when you extend/fix existing functionality.\n\n- To maintain consistency in code formatting and style, you should also run `clang-format` on the modified sources with the provided configuration file. This applies TensorRT code formatting rules to:\n - class, function/method, and variable/field naming\n - comment style\n - indentation\n - line length\n\n- Format git changes:\n ```bash\n # Commit ID is optional - if unspecified, run format on staged changes.\n git-clang-format --style file [commit ID/reference]\n ```\n\n- Format individual source files:\n ```bash\n # -style=file : Obtain the formatting rules from .clang-format\n # -i : In-place modification of the processed file\n clang-format -style=file -i -fallback-style=none \n ```\n\n- Format entire codebase (for project maintainers only):\n ```bash\n find samples plugin -iname *.h -o -iname *.c -o -iname *.cpp -o -iname *.hpp \\\n | xargs clang-format -style=file -i -fallback-style=none\n ```\n\n- Avoid introducing unnecessary complexity into existing code so that maintainability and readability are preserved.\n\n- Try to keep pull requests (PRs) as concise as possible:\n - Avoid committing commented-out code.\n - Wherever possible, each PR should address a single concern. If there are several otherwise-unrelated things that should be fixed to reach a desired endpoint, our recommendation is to open several PRs and indicate the dependencies in the description. The more complex the changes are in a single PR, the more time it will take to review those changes.\n\n- Write commit titles using imperative mood and [these rules](https://chris.beams.io/posts/git-commit/), and reference the Issue number corresponding to the PR. Following is the recommended format for commit texts:\n```\n# - \n\n\n```\n\n- Ensure that the build log is clean, meaning no warnings or errors should be present.\n\n- Ensure that all `sample_*` tests pass prior to submitting your code.\n\n- All OSS components must contain accompanying documentation (READMEs) describing the functionality, dependencies, and known issues.\n\n - See `README.md` for existing samples and plugins for reference.\n\n- All OSS components must have an accompanying test.\n\n - If introducing a new component, such as a plugin, provide a test sample to verify the functionality.\n\n- To add or disable functionality:\n - Add a CMake option with a default value that matches the existing behavior.\n - Where entire files can be included/excluded based on the value of this option, selectively include/exclude the relevant files from compilation by modifying `CMakeLists.txt` rather than using `#if` guards around the entire body of each file.\n - Where the functionality involves minor changes to existing files, use `#if` guards.\n\n- Make sure that you can contribute your work to open source (no license and/or patent conflict is introduced by your code). You will need to [`sign`](#signing-your-work) your commit.\n\n- Thanks in advance for your patience as we review your contributions; we do appreciate them!\n\n\n#### Pull Requests\nDeveloper workflow for code contributions is as follows:\n\n1. Developers must first [fork](https://help.github.com/en/articles/fork-a-repo) the [upstream](https://github.com/nvidia/TensorRT) TensorRT OSS repository.\n\n2. Git clone the forked repository and push changes to the personal fork.\n\n ```bash\ngit clone https://github.com/YOUR_USERNAME/YOUR_FORK.git TensorRT\n# Checkout the targeted branch and commit changes\n# Push the commits to a branch on the fork (remote).\ngit push -u origin :\n ```\n\n3. Once the code changes are staged on the fork and ready for review, a [Pull Request](https://help.github.com/en/articles/about-pull-requests) (PR) can be [requested](https://help.github.com/en/articles/creating-a-pull-request) to merge the changes from a branch of the fork into a selected branch of upstream.\n * Exercise caution when selecting the source and target branches for the PR.\n Note that versioned releases of TensorRT OSS are posted to `release/` branches of the upstream repo.\n * Creation of a PR creation kicks off the code review process.\n * Atleast one TensorRT engineer will be assigned for the review.\n * While under review, mark your PRs as work-in-progress by prefixing the PR title with [WIP].\n\n4. Since there is no CI/CD process in place yet, the PR will be accepted and the corresponding issue closed only after adequate testing has been completed, manually, by the developer and/or TensorRT engineer reviewing the code.\n\n\n#### Signing Your Work\n\n* We require that all contributors \"sign-off\" on their commits. This certifies that the contribution is your original work, or you have rights to submit it under the same license, or a compatible license.\n\n * Any contribution which contains commits that are not Signed-Off will not be accepted.\n\n* To sign off on a commit you simply use the `--signoff` (or `-s`) option when committing your changes:\n ```bash\n $ git commit -s -m \"Add cool feature.\"\n ```\n This will append the following to your commit message:\n ```\n Signed-off-by: Your Name \n ```\n\n* Full text of the DCO:\n\n ```\n Developer Certificate of Origin\n Version 1.1\n \n Copyright (C) 2004, 2006 The Linux Foundation and its contributors.\n 1 Letterman Drive\n Suite D4700\n San Francisco, CA, 94129\n \n Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.\n ```\n\n ```\n Developer's Certificate of Origin 1.1\n \n By making a contribution to this project, I certify that:\n \n (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or\n \n (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or\n \n (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.\n \n (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.\n ```"},"size":{"kind":"number","value":7222,"string":"7,222"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":50.226949998390424,"string":"50.22695"},"max_line_length":{"kind":"number","value":387,"string":"387"},"alphanum_fraction":{"kind":"number","value":0.7545001383613265,"string":"0.7545"}}},{"rowIdx":9448,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/README.md"},"content":{"kind":"string","value":"# About\n\nThis is an extension designed to run in a Nvidia Omniverse application such as Create or Machinima. The extension creates a link to a Rhino.Compute Server [https://developer.rhino3d.com/guides/compute/] allowing you to run Rhino commands such as quad remesh or Grasshopper files.\n\nThis is designed to be a sample to extend. there are examples for using some basic rhino command like volume and quad remesh as well as running a Grasshopper script. Use this as a starting point to integrate your grasshopper scripts and functions directly into Omniverse and create the necessary UI elements. \n\n\n\n# Using It\n\n- \"app\" - It is a folder link to the location of your *Omniverse Kit* based app.\n- \"exts\" - is the folder where you add to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).\n\nOpen this folder using Visual Studio Code. It will suggest you install a few extensions that will make python experience better. \n\nLook for \"cerver.util.rhinocompute\" extension in extension manager inside Omniverse Create and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.\n\nThe first time you enable it will take some time to load. this is because all of the required packages from rhino and rhino compute will be installed into your Omniverse python library via a automatic pip install. \n\n# 3rd party Libraries\n\nThis project references 3rd party libraries with the following licensing \n\nRhino.compute\nhttps://github.com/mcneel/compute.rhino3d/blob/master/LICENSE\n\nRhino3dm\nhttps://github.com/mcneel/rhino3dm/blob/main/LICENSE \n\nPlotly\nhttps://github.com/plotly/plotly.py/blob/master/LICENSE.txt\n\n"},"size":{"kind":"number","value":1794,"string":"1,794"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":53.39393777594128,"string":"53.393938"},"max_line_length":{"kind":"number","value":309,"string":"309"},"alphanum_fraction":{"kind":"number","value":0.7959866216298848,"string":"0.795987"}}},{"rowIdx":9449,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/extension.py"},"content":{"kind":"string","value":"# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\n\nimport omni.ext\nimport omni.ui as ui\nimport omni.usd\nfrom .RhinoComputeFunctions import RhinoFunctions, GrasshopperFunctions\nfrom .RhinoComputUtil import SaveSelectedAs3dm\n\n# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be\n# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled\n# on_shutdown() is called.\nclass MyExtension(omni.ext.IExt):\n # ext_id is current extension id. It can be used with extension manager to query additional information, like where\n # this extension is located on filesystem.\n def __init__(self): \n self.computeUrl=\"http://localhost:6500/\"\n self.progressbarprog = 0\n self.progbarwindow = None\n self.excludeLastGroupAsLayer = False\n\n\n \n\n def on_startup(self, ext_id):\n #print(\"[omni.RhinoCompute] MyExtension startup\")\n def serverAddrChanged(addr):\n self.computeUrl = addr\n\n self._window = ui.Window(\"Rhino Compute Functions\", width=300, height=400)\n with self._window.frame:\n with ui.VStack():\n ui.Label(\"Compute Server Address\")\n serverAddrUi = ui.StringField(height = 30)\n serverAddrUi.model.set_value(self.computeUrl)\n serverAddrUi.model.add_value_changed_fn(lambda m:serverAddrChanged(m.get_value_as_string()))\n with ui.CollapsableFrame(\"Util Functions\", height = 0):\n with ui.VStack():\n ui.Button(\"save sel as 3dm\", clicked_fn=lambda: SaveSelectedAs3dm(self,\"S:/test.3dm\"), height=40)\n ui.Button(\"save all as 3dm\", clicked_fn=lambda: RhinoFunctions.SaveAllAs3DM_UI(self), height=40)\n with ui.CollapsableFrame(\"Mesh Functions\", height = 0):\n with ui.VStack():\n ui.Button(\"Volume\", clicked_fn=lambda: RhinoFunctions.MeshVolume(self), height=40)\n ui.Button(\"Mesh Bool Union\", clicked_fn=lambda: RhinoFunctions.MeshBoolUnion(self), height=40)\n ui.Button(\"Quad Remesh\", clicked_fn=lambda: RhinoFunctions.MeshQuadRemesh(self), height=40)\n ui.Button(\"Mesh Offset\", clicked_fn=lambda: RhinoFunctions.MeshOffset(self), height=40)\n with ui.CollapsableFrame(\"Grasshopper Functions\", height = 0):\n with ui.VStack():\n ui.Button(\"Random Diamonds Script\", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds_UI(self), height=40)\n \n def on_shutdown(self):\n print(\"[omni.RhinoCompute] MyExtension shutdown\")\n"},"size":{"kind":"number","value":3121,"string":"3,121"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":51.91525335736859,"string":"51.915253"},"max_line_length":{"kind":"number","value":135,"string":"135"},"alphanum_fraction":{"kind":"number","value":0.6648510090788686,"string":"0.664851"}}},{"rowIdx":9450,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/__init__.py"},"content":{"kind":"string","value":"from .extension import *\n"},"size":{"kind":"number","value":25,"string":"25"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":11.999994000003,"string":"11.999994"},"max_line_length":{"kind":"number","value":24,"string":"24"},"alphanum_fraction":{"kind":"number","value":0.7599999696000012,"string":"0.76"}}},{"rowIdx":9451,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/RhinoComputeFunctions.py"},"content":{"kind":"string","value":"# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport string\n\nimport omni.ext\nimport omni.ui as ui\nfrom pxr import Usd, UsdGeom\nimport omni.usd\nimport carb.events\nimport omni.kit.app\nimport os\n\nimport json\nimport time\n\nomni.kit.pipapi.install(\"rhino3dm\")\nfrom rhino3dm import *\n\nomni.kit.pipapi.install(\"compute-rhino3d\")\nimport compute_rhino3d.Util\nimport compute_rhino3d.Mesh\nimport compute_rhino3d.Grasshopper as gh\nfrom .RhinoComputUtil import *\n\nomni.kit.pipapi.install(\"plotly==5.4.0\")\nimport plotly.graph_objects as go\n\n\n\nclass RhinoFunctions:\n\n def ComputeServerUrl(self):\n return self.computeUrl\n\n def MeshVolume(self):\n #add the compute server location\n compute_rhino3d.Util.url = self.computeUrl\n\n #convert selected items to rhino mesh\n meshes = convertSelectedUsdMeshToRhino()\n \n \n vols = []\n names = []\n rhinoMeshes = []\n\n #for each mesh compute the volume and then add the volume and name to a list\n for m in meshes:\n rhinoMeshes.append(m[\"Mesh\"])\n vol = compute_rhino3d.Mesh.Volume(m[\"Mesh\"])\n vols.append(vol)\n names.append(m[\"Name\"])\n \n #use plotly to plot the volumes as a pie chart\n fig = go.Figure(\n data=[go.Pie(values=vols, labels=names)],\n layout_title_text=\"the Volumes\"\n )\n fig.show()\n \n def MeshBoolUnion(self) -> None:\n #add the compute server location\n compute_rhino3d.Util.url = self.computeUrl\n \n #convert selected items to rhino mesh\n meshes = convertSelectedUsdMeshToRhino()\n\n #for each mesh compute the bool union\n rhinoMeshes = []\n for m in meshes:\n rhinoMeshes.append(m[\"Mesh\"])\n rhinoMeshes = compute_rhino3d.Mesh.CreateBooleanUnion(rhinoMeshes)\n \n #add to the stage after converting back from rhino to USD mesh\n #ToDo: add UI to define prim path and names\n ct=0\n for rm in rhinoMeshes:\n RhinoMeshToUsdMesh(\"/World/rhinoComputed/\",f\"BoolUnion_{ct}\",rm)\n \n def MeshQuadRemesh(self)-> None:\n compute_rhino3d.Util.url = self.computeUrl\n meshes = convertSelectedUsdMeshToRhino()\n\n #setup all the params for quad remesh\n #ToDo: make this a UI for user\n parameters = {\n 'AdaptiveQuadCount': True, \n 'AdaptiveSize': 50.0, \n 'DetectHardEdges': True, \n 'GuideCurveInfluence': 0, \n 'PreserveMeshArrayEdgesMode': 0, \n 'TargetQuadCount': 2000\n }\n names = []\n rhinoMeshes = []\n \n for m in meshes:\n weldVerts = compute_rhino3d.Mesh.Weld(m[\"Mesh\"],0.5)\n qrm =compute_rhino3d.Mesh.QuadRemesh(weldVerts,parameters)\n name = m[\"Name\"]\n if qrm is not None:\n rhinoMeshes.append(qrm)\n names.append(name)\n RhinoMeshToUsdMesh(\"/World/rhinoComputed/\",name+\"_QuadRemesh\",qrm)\n else:\n warning(f\"QuadRemesh Failed on {name}\")\n \n def MeshWeld(self, tol)-> None:\n compute_rhino3d.Util.url = self.computeUrl\n meshes = convertSelectedUsdMeshToRhino()\n\n names = []\n rhinoMeshes = []\n \n for m in meshes:\n weldVerts = compute_rhino3d.Mesh.Weld(m[\"Mesh\"],tol)\n name = m[\"Name\"]\n if weldVerts is not None:\n rhinoMeshes.append(weldVerts)\n names.append(name)\n RhinoMeshToUsdMesh(\"/World/rhinoComputed/\",name+\"_Weld\",weldVerts)\n else:\n warning(f\"Weld Failed on {name}\")\n \n def MeshOffset(self)-> None:\n compute_rhino3d.Util.url = self.computeUrl\n meshes = convertSelectedUsdMeshToRhino()\n\n names = []\n rhinoMeshes = []\n for m in meshes:\n macf = compute_rhino3d.Mesh.Offset1(m[\"Mesh\"],1,True)\n rhinoMeshes.append(macf)\n name = m[\"Name\"]\n names.append(name)\n RhinoMeshToUsdMesh(\"/World/rhinoComputed/\",name+\"_offset\",macf)\n\n \n\n def SaveAllAs3DM_UI(self):\n window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR\n #window_flags |= ui.WINDOW_FLAGS_NO_TITLE_BAR\n self.export3dmwindow = ui.Window(\"Export Stage As 3DM\", width=300, height=130, flags=window_flags)\n with self.export3dmwindow.frame: \n with ui.VStack():\n with ui.HStack():\n ui.Label(\"Path\", width=50, height = 25)\n path = ui.StringField( height = 25, tooltip = \"Set the location and name of the file i.e c:/temp/myRhinofile.3dm\")\n with ui.HStack( height = 35):\n def exLastGrpAsLayCb_changed(self, val):\n self.excludeLastGroupAsLayer = val\n print(val)\n \n exLastGrpAsLayCb = ui.CheckBox(width = 30)\n exLastGrpAsLayCb.model.add_value_changed_fn(lambda cb: exLastGrpAsLayCb_changed(self,cb.get_value_as_bool() ) )\n ui.Label(\"Exlude last group as layer\", width=50, height = 15)\n \n def exportbt():\n SaveAllas3DM(self,path.model.get_value_as_string())\n ui.Line()\n ui.Button(\"Export\", clicked_fn=lambda: exportbt(), height=25)\n\n\n\nclass GrasshopperFunctions:\n\n def randomDiamonds(self,uCt,vCt,rrA,rrB):\n compute_rhino3d.Util.url = self.computeUrl\n \n ghFile = os.path.dirname(os.path.dirname(__file__)) + \"/rhinocompute/gh/randomDiamonds.ghx\"\n selectedMeshes = convertSelectedUsdMeshToRhino()\n inputMesh = selectedMeshes[0][\"Mesh\"]\n \n\n # create list of input trees\n ghMesh = json.dumps(inputMesh.Encode())\n mesh_tree = gh.DataTree(\"baseMesh\")\n mesh_tree.Append([0], [ghMesh])\n\n srfU_tree = gh.DataTree(\"srfU\")\n srfU_tree.Append([0], [uCt])\n\n srfV_tree = gh.DataTree(\"srfV\")\n srfV_tree.Append([0], [vCt])\n\n rrA_tree = gh.DataTree(\"RR_A\")\n rrA_tree.Append([0], [rrA])\n\n rrB_tree = gh.DataTree(\"RR_B\")\n rrB_tree.Append([0], [rrB])\n\n\n inputs = [mesh_tree, srfU_tree, srfV_tree, rrA_tree, rrB_tree]\n\n results = gh.EvaluateDefinition(ghFile, inputs)\n \n \n # decode results\n \n data = results['values'][0]['InnerTree']['{0}']\n outMeshes = [rhino3dm.CommonObject.Decode(json.loads(item['data'])) for item in data]\n \n ct = 0\n for m in outMeshes:\n RhinoMeshToUsdMesh(\"/World\",f\"/randomDiamonds/randomDiamonds_{ct}\",m)\n ct+=1\n\n def randomDiamonds_UI(self):\n def run(uCt,vCt,rrA,rrB):\n GrasshopperFunctions.randomDiamonds(self,uCt, vCt, rrA,rrB)\n \n #window_flags = ui.WINDOW_FLAGS_NO_RESIZE\n sliderStyle = {\"border_radius\":15, \"background_color\": 0xFFDDDDDD, \"secondary_color\":0xFFAAAAAA, \"color\":0xFF111111, \"margin\":3}\n\n window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR\n self.theWindow = ui.Window(\"Random Diamonds\", width=300, height=200, flags=window_flags)\n with self.theWindow.frame:\n with ui.VStack():\n with ui.HStack():\n ui.Label(\"U Ct\", width=40)\n srfU = ui.IntSlider(height= 20, min=1, max=50, style= sliderStyle )\n with ui.HStack():\n ui.Label(\"V Ct\", width=40)\n srfV = ui.IntSlider(height= 20, min=1, max=50, style= sliderStyle )\n with ui.HStack():\n ui.Label(\"min D\", width=40)\n rrA = ui.FloatSlider(height= 20, min=0.1, max=150, style= sliderStyle )\n with ui.HStack():\n ui.Label(\"max D\", width=40)\n rrB = ui.FloatSlider(height= 20, min=0.1, max=150, style= sliderStyle )\n\n srfU.model.set_value(4)\n srfV.model.set_value(4)\n rrA.model.set_value(4)\n rrB.model.set_value(75)\n\n srfU.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))\n srfV.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))\n rrA.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))\n rrB.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))\n\n ui.Line(height=10)\n ui.Button(\"Run >>\", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds(self, \n srfU.model.get_value_as_int(),\n srfV.model.get_value_as_int(),\n rrA.model.get_value_as_float(),\n rrB.model.get_value_as_float(),\n ), height=30)"},"size":{"kind":"number","value":9607,"string":"9,607"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":36.97628443882891,"string":"36.976284"},"max_line_length":{"kind":"number","value":184,"string":"184"},"alphanum_fraction":{"kind":"number","value":0.5846778390148144,"string":"0.584678"}}},{"rowIdx":9452,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/RhinoComputUtil.py"},"content":{"kind":"string","value":"# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport compute_rhino3d.Util\nimport compute_rhino3d.Mesh\nimport compute_rhino3d.Grasshopper as gh\nimport rhino3dm\nimport json\nimport omni.ext\nimport omni.ui as ui\nfrom pxr import Usd, UsdGeom, Gf\nimport omni.usd\nimport asyncio\n\n\ndef convertSelectedUsdMeshToRhino():\n context = omni.usd.get_context()\n stage = omni.usd.get_context().get_stage()\n prims = [stage.GetPrimAtPath(m) for m in context.get_selection().get_selected_prim_paths() ]\n\n #filter out prims that are not mesh\n selected_prims = [\n prim for prim \n in prims\n if UsdGeom.Mesh(prim)]\n \n #setup var to hold the mesh, its name in the dict\n sDict = []\n\n #add the converted prims to the dict\n for m in selected_prims:\n sDict.append({\"Name\": m.GetName(), \"Mesh\":UsdMeshToRhinoMesh(m)})\n \n return sDict\n\ndef UsdMeshToRhinoMesh(usdMesh):\n #array for the mesh items\n vertices = []\n faces = []\n\n #get the USD points\n points = UsdGeom.Mesh(usdMesh).GetPointsAttr().Get()\n \n #setup the items needed to deal with world and local transforms\n xform_cache = UsdGeom.XformCache()\n mtrx_world = xform_cache.GetLocalToWorldTransform(usdMesh)\n\n #create the rhino mesh\n mesh = rhino3dm.Mesh()\n\n #convert the USD points to rhino points\n for p in points:\n world_p = mtrx_world.Transform(p)\n mesh.Vertices.Add(world_p[0],world_p[1],world_p[2]) \n \n #faces we can extend directly into the aray becaue they are just ints\n faces.extend( UsdGeom.Mesh(usdMesh).GetFaceVertexIndicesAttr().Get())\n faceCount = UsdGeom.Mesh(usdMesh).GetFaceVertexCountsAttr().Get()\n\n ct = 0\n #add the face verts, USD uses a flat list of ints so we need to deal with\n #3 or 4 sided faces. USD supports ngons but that is not accounted for\n #ToDo: Deal with ngons\n for i in range(0,len(faceCount)):\n fc=faceCount[i] \n if fc is 3:\n mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2])\n if fc is 4:\n mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2], faces[ct+3])\n ct+=fc\n \n #compute normals, i dont use the USD normals here but you could\n mesh.Normals.ComputeNormals()\n mesh.Compact()\n\n return mesh\n\ndef save_stage():\n stage = omni.usd.get_context().get_stage()\n stage.GetRootLayer().Save()\n omni.client.usd_live_process()\n \ndef RhinoMeshToUsdMesh( rootUrl, meshName, rhinoMesh: rhino3dm.Mesh , primPath=None):\n #get the stage\n stage = omni.usd.get_context().get_stage()\n\n \n\t# Create the geometry inside of \"Root\"\n meshPrimPath = rootUrl + meshName\n mesh = UsdGeom.Mesh.Define(stage, meshPrimPath)\n \n\t# Add all of the vertices\n points = []\n\n for i in range(0,len(rhinoMesh.Vertices)):\n v = rhinoMesh.Vertices[i]\n points.append(Gf.Vec3f(v.X, v.Y, v.Z))\n mesh.CreatePointsAttr(points)\n\n\n\t# Calculate indices for each triangle\n faceIndices = []\n faceVertexCounts = []\n \n \n for i in range(0, rhinoMesh.Faces.Count):\n fcount=3\n curf = rhinoMesh.Faces[i]\n\n faceIndices.append(curf[0])\n faceIndices.append(curf[1])\n faceIndices.append(curf[2])\n if curf[2] != curf[3]:\n faceIndices.append(curf[3])\n fcount=4\n #print(f\"{fcount} : {curf}\")\n faceVertexCounts.append(fcount)\n\n mesh.CreateFaceVertexIndicesAttr(faceIndices)\n mesh.CreateFaceVertexCountsAttr(faceVertexCounts)\n \n\t# Add vertex normals\n meshNormals = []\n for n in rhinoMesh.Normals:\n meshNormals.append(Gf.Vec3f(n.X,n.Y,n.Z))\n mesh.CreateNormalsAttr(meshNormals)\n \ndef SaveRhinoFile(rhinoMeshes, path):\n model = rhino3dm.File3dm()\n [ model.Objects.AddMesh(m) for m in rhinoMeshes]\n model.Write(path)\n\ndef SaveSelectedAs3dm(self,path):\n selectedMeshes = convertSelectedUsdMeshToRhino()\n meshobj = [d['Mesh'] for d in selectedMeshes]\n SaveRhinoFile(meshobj, path)\n\ndef SaveAllas3DM(self, path):\n #get the stage\n stage = omni.usd.get_context().get_stage()\n #get all prims that are meshes\n meshPrims = [stage.GetPrimAtPath(prim.GetPath()) for prim in stage.Traverse() if UsdGeom.Mesh(prim)]\n #make a rhino file\n rhinoFile = rhino3dm.File3dm()\n uniqLayers = {}\n #figure out how many elements there are (to implament progress bar in future)\n numPrims = len(meshPrims)\n curPrim = 0\n\n #loop over all the meshes\n for mp in meshPrims:\n #convert from usd mesh to rhino mesh\n rhinoMesh = UsdMeshToRhinoMesh(mp)\n objName = mp.GetName()\n rhinoAttr = rhino3dm.ObjectAttributes()\n \n\n dataOnParent = False \n #get the properties on the prim \n bimProps = None\n parentPrim = mp.GetParent()\n #see if this prim has BIM properties (from revit)\n if parentPrim:\n bimProps = mp.GetPropertiesInNamespace(\"BIM\")\n dataOnParent = False \n #see if this prims parent has BIM properties (from revit)\n if not bimProps:\n bimProps = parentPrim.GetPropertiesInNamespace(\"BIM\")\n dataOnParent = True \n #if no bim properties just add regular ones\n if not bimProps :\n bimProps = mp.GetProperties()\n dataOnParent = False \n\n for p in bimProps:\n try:\n pName = p.GetBaseName()\n var = p.Get()\n rhinoAttr.SetUserString(pName, str(var))\n except Exception :\n pass\n\n # get the prims path and use that to create nested layers in rhino\n primpath = str(mp.GetPath())\n sepPrimPath = primpath.split('/')\n sepPrimPath.pop(0)\n sepPrimPath.pop()\n # this will ajust the layer structure if the data is from the revit connector \n # or if you just want to prune the last group in the export dialogue \n if dataOnParent or self.excludeLastGroupAsLayer:\n sepPrimPath.pop()\n nestedLayerName = '::'.join(sepPrimPath)\n ct=0\n\n curLayer = \"\"\n #loop over all the prim paths to created the nested layers in rhino\n for pp in sepPrimPath:\n \n if ct == 0:\n curLayer += pp\n else:\n curLayer += f\"::{pp}\"\n \n #check if the layer exists, if not make it\n if not curLayer in uniqLayers :\n layer = rhino3dm.Layer()\n \n if ct>0:\n prevLayer = curLayer.split('::')\n prevLayer.pop()\n prevLayer = '::'.join(prevLayer)\n layer.ParentLayerId = rhinoFile.Layers.FindIndex(uniqLayers[prevLayer]).Id\n layer.Color = (255,255,255,255)\n layer.Name = pp\n idx = rhinoFile.Layers.Add(layer)\n uniqLayers[curLayer]= int(idx)\n ct+=1\n \n rhinoAttr.Name = objName\n #print(str(uniqLayers[nestedLayerName]))\n rhinoAttr.LayerIndex = int(str(uniqLayers[nestedLayerName]))\n \n #add the mesh and its attributes to teh rhino file\n rhinoFile.Objects.AddMesh(rhinoMesh, rhinoAttr)\n\n curPrim += 1\n self.progressbarprog = curPrim/numPrims\n\n #save it all\n rhinoFile.Write(path)\n print(\"completed saving\")\n\n\n\n\n\n\n\n"},"size":{"kind":"number","value":7771,"string":"7,771"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":30.98353896714593,"string":"30.983539"},"max_line_length":{"kind":"number","value":104,"string":"104"},"alphanum_fraction":{"kind":"number","value":0.625402136066735,"string":"0.625402"}}},{"rowIdx":9453,"cells":{"file_path":{"kind":"string","value":"rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/config/extension.toml"},"content":{"kind":"string","value":"[package]\n# Semantic Versionning is used: https://semver.org/\nversion = \"1.0.3\"\n\n# The title and description fields are primarily for displaying extension info in UI\ntitle = \"Rhino Compute for Omniverse\"\ndescription=\"Omniverse intergration with a rhino.compute server\"\n\n# Path (relative to the root) or content of readme markdown file for UI.\nreadme = \"../../README.md\"\n\n# URL of the extension source repository.\nrepository = \"https://github.com/rcervellione-nv/omni.rhinocompute\"\n\n# One of categories for UI.\ncategory = \"Utility\"\n\n# Keywords for the extension\nkeywords = [\"kit\", \"Rhino\", \"Compute\"]\n\n# Icon to show in the extension manager\nicon = \"data/computeTerminal.png\"\n\n# Preview to show in the extension manager\npreview_image = \"data/CreateAndCompute.png\"\n\n# Use omni.ui to build simple UI\n[dependencies]\n\"omni.kit.uiapp\" = {}\n\n# Main python module this extension provides, it will be publicly available as \"import omni.hello.world\".\n[[python.module]]\nname = \"cerver.util.rhinocompute\"\n"},"size":{"kind":"number","value":994,"string":"994"},"lang":{"kind":"string","value":"TOML"},"avg_line_length":{"kind":"number","value":28.26470505103809,"string":"28.264705"},"max_line_length":{"kind":"number","value":105,"string":"105"},"alphanum_fraction":{"kind":"number","value":0.7474849087047436,"string":"0.747485"}}},{"rowIdx":9454,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/README.md"},"content":{"kind":"string","value":"# LLM MetaHuman\n\nLLM MetaHuman is an open solution for AI-powered photorealistic digital humans.\n\n## Preparation steps\n\n- Install [Omniverse Launcher](https://www.nvidia.com/en-us/omniverse/download/)\n- Inside Omniverse Launcher, Install `Audio2Face`.\n- Install [Epic Games Store](https://store.epicgames.com/en-US/)\n- Inside Epic Games Store, Install Unreal Engine 5.x.\n- Follow [Audio2Face to UE Live Link Plugin](https://docs.omniverse.nvidia.com/audio2face/latest/user-manual/livelink-ue-plugin.html) to connect Audi2Face to Unreal Engine.\n\n## Launch Audio2Face headless\n\n## Launch llm.py\n\n## Launch Unreal Engine Metahuman\n\n"},"size":{"kind":"number","value":629,"string":"629"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":32.15789304432142,"string":"32.157893"},"max_line_length":{"kind":"number","value":172,"string":"172"},"alphanum_fraction":{"kind":"number","value":0.7726550067207393,"string":"0.772655"}}},{"rowIdx":9455,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/audio2face_pb2_grpc.py"},"content":{"kind":"string","value":"# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nimport audio2face_pb2 as audio2face__pb2\n\n\nclass Audio2FaceStub(object):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.PushAudio = channel.unary_unary(\n '/nvidia.audio2face.Audio2Face/PushAudio',\n request_serializer=audio2face__pb2.PushAudioRequest.SerializeToString,\n response_deserializer=audio2face__pb2.PushAudioResponse.FromString,\n )\n self.PushAudioStream = channel.stream_unary(\n '/nvidia.audio2face.Audio2Face/PushAudioStream',\n request_serializer=audio2face__pb2.PushAudioStreamRequest.SerializeToString,\n response_deserializer=audio2face__pb2.PushAudioStreamResponse.FromString,\n )\n\n\nclass Audio2FaceServicer(object):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def PushAudio(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PushAudioStream(self, request_iterator, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_Audio2FaceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'PushAudio': grpc.unary_unary_rpc_method_handler(\n servicer.PushAudio,\n request_deserializer=audio2face__pb2.PushAudioRequest.FromString,\n response_serializer=audio2face__pb2.PushAudioResponse.SerializeToString,\n ),\n 'PushAudioStream': grpc.stream_unary_rpc_method_handler(\n servicer.PushAudioStream,\n request_deserializer=audio2face__pb2.PushAudioStreamRequest.FromString,\n response_serializer=audio2face__pb2.PushAudioStreamResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'nvidia.audio2face.Audio2Face', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass Audio2Face(object):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n @staticmethod\n def PushAudio(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/nvidia.audio2face.Audio2Face/PushAudio',\n audio2face__pb2.PushAudioRequest.SerializeToString,\n audio2face__pb2.PushAudioResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def PushAudioStream(request_iterator,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.stream_unary(request_iterator, target, '/nvidia.audio2face.Audio2Face/PushAudioStream',\n audio2face__pb2.PushAudioStreamRequest.SerializeToString,\n audio2face__pb2.PushAudioStreamResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n"},"size":{"kind":"number","value":4130,"string":"4,130"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":40.309999596900006,"string":"40.31"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.6547215494782757,"string":"0.654722"}}},{"rowIdx":9456,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/gen_protoc.py"},"content":{"kind":"string","value":"import os\nimport subprocess\n\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nproto_src_root = os.path.normpath(os.path.join(ROOT_DIR, \"proto/\"))\nproto_dst_root = os.path.normpath(os.path.join(ROOT_DIR, \".\"))\nproto_fpath = os.path.normpath(os.path.join(ROOT_DIR, \"proto\", \"audio2face.proto\"))\n\ncmd = [\n \"python\",\n \"-m\",\n \"grpc_tools.protoc\",\n \"-I\",\n f\"{proto_src_root}\",\n f\"--python_out={proto_dst_root}\",\n f\"--grpc_python_out={proto_dst_root}\",\n f\"{proto_fpath}\",\n]\n\nprint(cmd)\nsubprocess.call(cmd)\n"},"size":{"kind":"number","value":530,"string":"530"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":22.086955561436714,"string":"22.086956"},"max_line_length":{"kind":"number","value":83,"string":"83"},"alphanum_fraction":{"kind":"number","value":0.6339622629547882,"string":"0.633962"}}},{"rowIdx":9457,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/test_client.py"},"content":{"kind":"string","value":"\"\"\"\nThis demo script shows how to send audio data to Audio2Face Streaming Audio Player via gRPC requests.\nThere are two options:\n * Send the whole track at once using PushAudioRequest()\n * Send the audio chunks seuqntially in a stream using PushAudioStreamRequest()\nFor the second option this script emulates the stream of chunks, generated by splitting an input WAV audio file.\nBut in a real application such stream of chunks may be aquired from some other streaming source:\n * streaming audio via internet, streaming Text-To-Speech, etc\ngRPC protocol details could be find in audio2face.proto\n\"\"\"\n\nimport sys\nimport time\n\nimport audio2face_pb2\nimport audio2face_pb2_grpc\nimport grpc\nimport numpy as np\nimport soundfile\n\n\ndef push_audio_track(url, audio_data, samplerate, instance_names):\n \"\"\"\n This function pushes the whole audio track at once via PushAudioRequest()\n PushAudioRequest parameters:\n * audio_data: bytes, containing audio data for the whole track, where each sample is encoded as 4 bytes (float32)\n * samplerate: sampling rate for the audio data\n * instance_names: prim path of the Audio2Face Streaming Audio Player on the stage, were to push the audio data\n * block_until_playback_is_finished: if True, the gRPC request will be blocked until the playback of the pushed track is finished\n The request is passed to PushAudio()\n \"\"\"\n\n block_until_playback_is_finished = True # ADJUST\n for instance_name in instance_names:\n with grpc.insecure_channel(url) as channel:\n stub = audio2face_pb2_grpc.Audio2FaceStub(channel)\n request = audio2face_pb2.PushAudioRequest()\n request.audio_data = audio_data.astype(np.float32).tobytes()\n request.samplerate = samplerate\n request.instance_name = instance_name\n request.block_until_playback_is_finished = block_until_playback_is_finished\n print(\"Sending audio data...\")\n response = stub.PushAudio(request)\n if response.success:\n print(\"SUCCESS\")\n else:\n print(f\"ERROR: {response.message}\")\n print(\"Closed channel\")\n\n\ndef push_audio_track_stream(url, audio_data, samplerate, instance_names):\n \"\"\"\n This function pushes audio chunks sequentially via PushAudioStreamRequest()\n The function emulates the stream of chunks, generated by splitting input audio track.\n But in a real application such stream of chunks may be aquired from some other streaming source.\n The first message must contain start_marker field, containing only meta information (without audio data):\n * samplerate: sampling rate for the audio data\n * instance_names: prim path of the Audio2Face Streaming Audio Player on the stage, were to push the audio data\n * block_until_playback_is_finished: if True, the gRPC request will be blocked until the playback of the pushed track is finished (after the last message)\n Second and other messages must contain audio_data field:\n * audio_data: bytes, containing audio data for an audio chunk, where each sample is encoded as 4 bytes (float32)\n All messages are packed into a Python generator and passed to PushAudioStream()\n \"\"\"\n\n chunk_size = samplerate // 10 # ADJUST\n sleep_between_chunks = 0.04 # ADJUST\n block_until_playback_is_finished = True # ADJUST\n\n with grpc.insecure_channel(url) as channel:\n print(\"Channel creadted\")\n stub = audio2face_pb2_grpc.Audio2FaceStub(channel)\n\n for instance_name in instance_names:\n def make_generator():\n start_marker = audio2face_pb2.PushAudioRequestStart(\n samplerate=samplerate,\n instance_name=instance_name,\n block_until_playback_is_finished=block_until_playback_is_finished,\n )\n # At first, we send a message with start_marker\n yield audio2face_pb2.PushAudioStreamRequest(start_marker=start_marker)\n # Then we send messages with audio_data\n for i in range(len(audio_data) // chunk_size + 1):\n time.sleep(sleep_between_chunks)\n chunk = audio_data[i * chunk_size : i * chunk_size + chunk_size]\n yield audio2face_pb2.PushAudioStreamRequest(audio_data=chunk.astype(np.float32).tobytes())\n\n request_generator = make_generator()\n print(\"Sending audio data...\")\n response = stub.PushAudioStream(request_generator)\n if response.success:\n print(\"SUCCESS\")\n else:\n print(f\"ERROR: {response.message}\")\n print(\"Channel closed\")\n\n\ndef main():\n \"\"\"\n This demo script shows how to send audio data to Audio2Face Streaming Audio Player via gRPC requests.\n There two options:\n * Send the whole track at once using PushAudioRequest()\n * Send the audio chunks seuqntially in a stream using PushAudioStreamRequest()\n For the second option this script emulates the stream of chunks, generated by splitting an input WAV audio file.\n But in a real application such stream of chunks may be aquired from some other streaming source:\n * streaming audio via internet, streaming Text-To-Speech, etc\n gRPC protocol details could be find in audio2face.proto\n \"\"\"\n\n if len(sys.argv) < 3:\n print(\"Format: python test_client.py PATH_TO_WAV INSTANCE_NAME\")\n return\n\n # Sleep time emulates long latency of the request\n sleep_time = 0.0 # ADJUST\n\n # URL of the Audio2Face Streaming Audio Player server (where A2F App is running)\n url = \"localhost:50051\" # ADJUST\n\n # Local input WAV file path\n audio_fpath = sys.argv[1]\n\n # Prim path of the Audio2Face Streaming Audio Player on the stage (were to push the audio data)\n instance_names = sys.argv[2:]\n\n data, samplerate = soundfile.read(audio_fpath, dtype=\"float32\")\n\n # Only Mono audio is supported\n if len(data.shape) > 1:\n data = np.average(data, axis=1)\n\n print(f\"Sleeping for {sleep_time} seconds\")\n time.sleep(sleep_time)\n\n if 0: # ADJUST\n # Push the whole audio track at once\n push_audio_track(url, data, samplerate, instance_names)\n else:\n # Emulate audio stream and push audio chunks sequentially\n push_audio_track_stream(url, data, samplerate, instance_names)\n\n\nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":6428,"string":"6,428"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":43.33793073560048,"string":"43.337931"},"max_line_length":{"kind":"number","value":158,"string":"158"},"alphanum_fraction":{"kind":"number","value":0.6832607341811977,"string":"0.683261"}}},{"rowIdx":9458,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/llm.py"},"content":{"kind":"string","value":"from openai import OpenAI\nfrom pydub import AudioSegment\nimport gradio as gr\nimport requests\nimport os\nfrom litellm import completion\nimport time\nimport threading\nimport queue\nimport gradio_client as gc\n\n\n# XXX: increase requests speed\n# https://stackoverflow.com/a/72440253\nrequests.packages.urllib3.util.connection.HAS_IPV6 = False\n\nargs = None\n\nCWD = os.getcwd()\nprint(\"CWD:\", CWD)\n\nVOICE_ACTORS = [\"nova\", \"alloy\", \"echo\", \"fable\", \"onyx\", \"shimmer\"]\n\n\ndef timing_decorator(func):\n def wrapper(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n elapsed_time = end_time - start_time\n print(f\"{func.__name__} cost: {elapsed_time:.2f} seconds.\")\n return result\n\n return wrapper\n\n\nclass A2fInstance:\n files_to_delete = []\n instaces = []\n\n def __init__(self, index) -> None:\n self.SERVICE_HEALTHY = False\n self.LIVELINK_SERVICE_HEALTHY = False\n self.index = index\n\n @timing_decorator\n def post(self, end_point, data=None, verbose=True):\n if not self.SERVICE_HEALTHY:\n return None\n\n if verbose:\n print(f\"++ {end_point}\")\n api_url = f\"{self.base_url}/{end_point}\"\n try:\n response = requests.post(api_url, json=data)\n\n if response and response.status_code == 200:\n if verbose:\n print(response.json())\n return response.json()\n else:\n if verbose:\n print(f\"Error: {response.status_code} - {response.text}\")\n return {\"Error\": response.status_code, \"Reason\": response.text}\n except Exception as e:\n print(e)\n self.SERVICE_HEALTHY = False\n return None\n\n @timing_decorator\n def get(self, end_point, data=None, verbose=True):\n if not self.SERVICE_HEALTHY:\n return None\n\n if verbose:\n print(f\"++ {end_point}\")\n api_url = f\"{self.base_url}/{end_point}\"\n\n try:\n response = requests.get(api_url, json=data)\n if response.status_code == 200:\n if verbose:\n print(response.json())\n return response.json()\n else:\n if verbose:\n print(f\"Error: {response.status_code} - {response.text}\")\n return {\"Error\": response.status_code, \"Reason\": response.text}\n except Exception as e:\n print(e)\n self.SERVICE_HEALTHY = False\n return None\n\n def player_setlooping(self, flag=True):\n self.post(\n \"A2F/Player/SetLooping\",\n {\"a2f_player\": args.a2f_player_id, \"loop_audio\": flag},\n )\n\n def player_play(self):\n self.post(\"A2F/Player/Play\", {\"a2f_player\": args.a2f_player_id})\n\n def player_pause(self):\n self.post(\"A2F/Player/Pause\", {\"a2f_player\": args.a2f_player_id})\n\n def player_setrootpath(self, dir_path):\n self.post(\n \"A2F/Player/SetRootPath\",\n {\"a2f_player\": args.a2f_player_id, \"dir_path\": dir_path},\n )\n\n def player_settrack(self, file_name):\n self.post(\n \"A2F/Player/SetTrack\",\n {\"a2f_player\": args.a2f_player_id, \"file_name\": file_name},\n )\n\n def player_gettracks(self):\n self.post(\"A2F/Player/GetTracks\", {\"a2f_player\": args.a2f_player_id})\n\n def player_gettime(self):\n response = self.post(\n \"A2F/Player/GetTime\", {\"a2f_player\": args.a2f_player_id}, False\n )\n if response and response[\"status\"] == \"OK\":\n return response[\"result\"]\n else:\n return 0\n\n def player_getrange(self):\n response = self.post(\n \"A2F/Player/GetRange\", {\"a2f_player\": args.a2f_player_id}, False\n )\n if response and response[\"status\"] == \"OK\":\n return response[\"result\"][\"work\"]\n else:\n return (0, 0)\n\n def generatekeys(self):\n self.post(\"A2F/A2E/GenerateKeys\", {\"a2f_instance\": args.a2f_instance_id})\n\n def ActivateStreamLivelink(self, flag):\n self.post(\n \"A2F/Exporter/ActivateStreamLivelink\",\n {\"node_path\": args.a2f_livelink_id, \"value\": flag},\n )\n\n def IsStreamLivelinkConnected(self):\n response = self.post(\n \"A2F/Exporter/IsStreamLivelinkConnected\",\n {\"node_path\": args.a2f_livelink_id},\n )\n if response and response[\"status\"] == \"OK\":\n return response[\"result\"]\n else:\n return False\n\n def enable_audio_stream(self, flag):\n self.post(\n \"A2F/Exporter/SetStreamLivelinkSettings\",\n {\n \"node_path\": args.a2f_livelink_id,\n \"values\": {\"enable_audio_stream\": flag},\n },\n )\n\n def set_livelink_ports(\n self,\n livelink_host,\n livelink_subject,\n livelink_port,\n livelink_audio_port,\n ):\n self.post(\n \"A2F/Exporter/SetStreamLivelinkSettings\",\n {\n \"node_path\": args.a2f_livelink_id,\n \"values\": {\n \"livelink_host\": livelink_host,\n \"livelink_subject\": livelink_subject,\n \"livelink_port\": livelink_port,\n \"audio_port\": livelink_audio_port,\n },\n },\n )\n\n def get_preprocessing(self):\n response = self.post(\n \"A2F/PRE/GetSettings\",\n {\"a2f_instance\": args.a2f_instance_id},\n )\n if response and response[\"status\"] == \"OK\":\n return response[\"result\"]\n else:\n return {}\n\n def set_preprocessing(self, settings):\n settings[\"a2f_instance\"] = args.a2f_instance_id\n self.post(\"A2F/PRE/SetSettings\", settings)\n\n def get_postprocessing(self):\n response = self.post(\n \"A2F/POST/GetSettings\",\n {\"a2f_instance\": args.a2f_instance_id},\n )\n if response and response[\"status\"] == \"OK\":\n return response[\"result\"]\n else:\n return {}\n\n def set_postprocessing(self, settings):\n self.post(\n \"A2F/POST/SetSettings\",\n {\"a2f_instance\": args.a2f_instance_id, \"settings\": settings},\n )\n\n def setup(self):\n self.base_url = f\"http://{args.a2f_host}:{args.a2f_port+self.index}\"\n self.tts_voice = args.tts_voice\n if self.index > 0:\n # TODO: make it elegant\n self.tts_voice = VOICE_ACTORS[self.index % len(VOICE_ACTORS)]\n\n # always ping SERVICE_HEALTHY again in setup()\n self.SERVICE_HEALTHY = True\n\n self.ActivateStreamLivelink(True)\n if not self.SERVICE_HEALTHY:\n return\n\n self.player_setrootpath(CWD)\n self.player_setlooping(False)\n\n self.LIVELINK_SERVICE_HEALTHY = self.IsStreamLivelinkConnected()\n if not self.LIVELINK_SERVICE_HEALTHY:\n return\n\n self.enable_audio_stream(True)\n\n self.set_livelink_ports(\n args.livelink_host,\n f\"{args.livelink_subject}-{self.index}\",\n args.livelink_port + 10 * self.index,\n args.livelink_audio_port + 10 * self.index,\n )\n\n pre_settings = self.get_preprocessing()\n pre_settings[\"prediction_delay\"] = 0\n pre_settings[\"blink_interval\"] = 1.5\n self.set_preprocessing(pre_settings)\n\n post_settings = self.get_postprocessing()\n post_settings[\"skin_strength\"] = 1.3\n self.set_postprocessing(post_settings)\n\n\nA2fInstance.instaces = []\nopenai_client = OpenAI()\ngc_client: gc.Client = None\nchat_ui: gr.ChatInterface = None\n\n\ndef run_single_pipeline(a2f, answer, a2f_peer=None):\n global stop_current_a2f_play\n\n if not a2f_peer:\n a2f_peer = a2f\n\n # print(answer)\n mp3_file = text_to_mp3(answer, a2f.tts_voice)\n wav_file = mp3_to_wav(mp3_file)\n duration = a2f_peer.player_getrange()[1]\n position = a2f_peer.player_gettime()\n while position > 0 and position < duration:\n print(position, duration)\n if stop_current_a2f_play:\n print(\"stop_current_a2f_play\")\n stop_current_a2f_play = False\n return\n\n time.sleep(1)\n position = a2f_peer.player_gettime()\n print(\"z\")\n time.sleep(1)\n a2f.player_setrootpath(CWD)\n a2f.player_settrack(wav_file)\n # a2f_generatekeys()\n\n a2f.player_play()\n\n for file in A2fInstance.files_to_delete:\n try:\n os.remove(file)\n except Exception:\n pass\n A2fInstance.files_to_delete.clear()\n\n A2fInstance.files_to_delete.append(mp3_file)\n A2fInstance.files_to_delete.append(wav_file)\n\n\ncurrent_speaker = -1\n\n\n@timing_decorator\ndef run_pipeline(answer):\n if args.a2f_instance_count == 1:\n run_single_pipeline(A2fInstance.instaces[0], answer)\n return\n\n global current_speaker\n if answer.startswith(\"(\"):\n current_speaker = -1\n elif answer.startswith(\"A:\"):\n current_speaker = 0\n answer = answer[2:]\n elif answer.startswith(\"B:\"):\n current_speaker = 1\n answer = answer[2:]\n\n if current_speaker < 0 or current_speaker >= args.a2f_instance_count:\n return\n\n a2f = A2fInstance.instaces[current_speaker]\n if not a2f.SERVICE_HEALTHY:\n return\n\n run_single_pipeline(a2f, answer)\n\n\n@timing_decorator\ndef text_to_mp3(text, voice):\n response = openai_client.audio.speech.create(\n model=args.tts_model,\n voice=voice,\n speed=args.tts_speed,\n input=text,\n )\n timestamp = time.time()\n mp3_filename = f\"{timestamp}.mp3\"\n response.stream_to_file(mp3_filename)\n\n return mp3_filename\n\n\n@timing_decorator\ndef mp3_to_wav(mp3_filename):\n sound = AudioSegment.from_mp3(mp3_filename)\n sound = sound.set_frame_rate(22050)\n wav_filename = f\"{mp3_filename}.wav\"\n sound.export(wav_filename, format=\"wav\")\n\n return wav_filename\n\n\n@timing_decorator\ndef get_completion(chat_history):\n response = completion(\n model=args.llm_model,\n messages=chat_history,\n api_base=args.llm_url,\n stream=args.llm_streaming,\n )\n\n print(response)\n return response\n\n\nq = queue.Queue()\ncleanup_queue = False\nstop_current_a2f_play = False\n\n\ndef pipeline_worker():\n while True:\n print(\"--------------------------\")\n global cleanup_queue\n global stop_current_a2f_play\n if cleanup_queue:\n while not q.empty():\n item = q.get()\n q.task_done()\n\n if item == \"cleanup_queue_token\":\n break\n cleanup_queue = False\n stop_current_a2f_play = True\n\n item = q.get()\n if item == \"cleanup_queue_token\":\n continue\n\n print(f\"Begin: {item}\")\n run_pipeline(item)\n print(f\"End: {item}\")\n q.task_done()\n\n\ndef talk_to_peer(message):\n if not gc_client:\n return\n\n result = gc_client.predict(\n message, api_name=\"/chat\" # str in 'Message' Textbox component\n )\n print(f\"from peer: {result}\")\n\n # chat_ui.textbox.submit(None, [result, result])\n # chat_ui.textbox.submit()\n\n\ndef predict(message, history):\n print(\"==========================\")\n if message == \"setup\":\n str = \"\"\n for a2f in A2fInstance.instaces:\n a2f.setup()\n str += f\"A2F running: {a2f.SERVICE_HEALTHY}\\n\"\n str += f\"Live Link running: {a2f.LIVELINK_SERVICE_HEALTHY}\\n\"\n yield str\n return\n\n if message == \"ping\":\n for a2f in A2fInstance.instaces:\n a2f.post(\"\")\n a2f.get(\"\")\n yield \"A2F ping\"\n return\n\n if message == \"redo\":\n for a2f in A2fInstance.instaces:\n a2f.player_play()\n yield \"A2F redo\"\n return\n\n if message == \"stop\":\n global cleanup_queue\n cleanup_queue = True\n q.put(\"cleanup_queue_token\")\n yield \"stopped\"\n return\n\n if message.startswith(\"peer\"):\n items = message.split()\n if len(items) >= 2:\n gradio_port = int(items[1])\n # TODO: support non localhost\n args.gradio_peer_url = f\"http://{args.gradio_host}:{gradio_port}/\"\n global gc_client\n gc_client = gc.Client(args.gradio_peer_url)\n\n yield f\"I will chat with another llm-metahuman: {args.gradio_peer_url}\"\n return\n\n history_openai_format = []\n for human, assistant in history:\n history_openai_format.append({\"role\": \"user\", \"content\": human})\n history_openai_format.append({\"role\": \"assistant\", \"content\": assistant})\n history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n # start_time = time.time()\n response = get_completion(history_openai_format)\n yield \"..\"\n\n # global cleanup_queue\n # cleanup_queue = True\n # q.put(\"cleanup_queue_token\")\n\n if args.llm_streaming:\n # create variables to collect the stream of chunks\n UNUSED_collected_chunks = []\n collected_messages = []\n complete_sentences = \"\"\n # iterate through the stream of events\n for chunk in response:\n # chunk_time = (\n # time.time() - start_time\n # ) # calculate the time delay of the chunk\n UNUSED_collected_chunks.append(chunk) # save the event response\n chunk_message = chunk.choices[0].delta.content # extract the message\n\n if not chunk_message:\n continue\n\n collected_messages.append(chunk_message) # save the message\n # print(\n # f\"Message {chunk_time:.2f} s after request: {chunk_message}\"\n # ) # print the delay and text\n print(chunk_message)\n\n if chunk_message in [\n \".\",\n \"!\",\n \"?\",\n \"。\",\n \"!\",\n \"?\",\n ] or chunk_message.endswith(\"\\n\"):\n # if not chunk_message or \"\\n\" in chunk_message:\n one_sentence = \"\".join([m for m in collected_messages if m is not None])\n if len(one_sentence) < 10:\n # ignore short sentences\n continue\n collected_messages = []\n complete_sentences += one_sentence\n q.put(one_sentence)\n # run_pipeline(one_sentence)\n\n yield complete_sentences\n\n talk_to_peer(one_sentence)\n\n # print the time delay and text received\n # print(f\"Full response received {chunk_time:.2f} seconds after request\")\n # # clean None in collected_messages\n # collected_messages = [m for m in collected_messages if m is not None]\n # full_reply_content = \"\".join([m for m in collected_messages])\n # print(f\"Full conversation received: {full_reply_content}\")\n # yield full_reply_content\n else:\n if len(response.choices[0].message.content) == 0:\n return\n\n answer = response.choices[0].message.content\n yield answer\n\n run_pipeline(answer)\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(description=\"llm.py arguments\")\n\n # gradio settings\n parser.add_argument(\"--a2f_instance_count\", type=int, default=1)\n parser.add_argument(\"--gradio_host\", default=\"localhost\")\n parser.add_argument(\"--gradio_port\", type=int, default=7860)\n parser.add_argument(\n \"--gradio_peer_url\",\n default=None,\n help=\"the gradio peer that this gradio instance will chat with. Default value is None, which means chat with a human.\",\n )\n\n # llm / litellm settings\n parser.add_argument(\"--llm_engine\", default=\"gpt\", choices=[\"gpt\", \"llama2\"])\n parser.add_argument(\n \"--llm_model\", default=None, help=\"https://docs.litellm.ai/docs/providers\"\n )\n parser.add_argument(\"--llm_url\", default=None)\n parser.add_argument(\n \"--llm_streaming\", default=True, action=argparse.BooleanOptionalAction\n )\n\n # audio2face settings\n parser.add_argument(\"--a2f_host\", default=\"localhost\")\n parser.add_argument(\"--a2f_port\", default=8011, type=int)\n parser.add_argument(\"--a2f_instance_id\", default=\"/World/audio2face/CoreFullface\")\n parser.add_argument(\"--a2f_player_id\", default=\"/World/audio2face/Player\")\n parser.add_argument(\"--a2f_livelink_id\", default=\"/World/audio2face/StreamLivelink\")\n\n # tts settings\n parser.add_argument(\"--tts_model\", default=\"tts-1\", choices=[\"tts-1\", \"tts-1-hd\"])\n parser.add_argument(\"--tts_speed\", default=1.1, type=float)\n\n # livelink settings\n parser.add_argument(\"--livelink_host\", default=\"localhost\")\n parser.add_argument(\"--livelink_port\", default=12030, type=int)\n parser.add_argument(\"--livelink_subject\", default=\"Audio2Face\")\n parser.add_argument(\"--livelink_audio_port\", default=12031, type=int)\n\n parser.add_argument(\n \"--tts_voice\",\n default=\"nova\",\n choices=VOICE_ACTORS,\n help=\"https://platform.openai.com/docs/guides/text-to-speech\",\n )\n\n global args\n args = parser.parse_args()\n\n if not args.llm_model:\n if args.llm_engine == \"gpt\":\n args.llm_model = args.llm_model or \"gpt-3.5-turbo\"\n elif args.llm_engine == \"llama2\":\n args.llm_model = args.llm_model or \"ollama/llama2\"\n args.llm_url = args.llm_url or \"http://localhost:11434\"\n\n threading.Thread(target=pipeline_worker, daemon=True).start()\n\n for i in range(args.a2f_instance_count):\n a2f = A2fInstance(i)\n a2f.setup()\n A2fInstance.instaces.append(a2f)\n\n global chat_ui\n chat_ui = gr.ChatInterface(\n predict,\n title=f\"llm-metahuman @{args.gradio_port}\",\n examples=[\"hello\", \"tell me 3 jokes\", \"what's the meaning of life?\"],\n )\n\n chat_ui.queue().launch(server_name=args.gradio_host, server_port=args.gradio_port)\n\n q.join()\n\n\nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":18138,"string":"18,138"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":28.736065526662188,"string":"28.736066"},"max_line_length":{"kind":"number","value":127,"string":"127"},"alphanum_fraction":{"kind":"number","value":0.5733267173572981,"string":"0.573327"}}},{"rowIdx":9459,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/ref/pytts-demo.py"},"content":{"kind":"string","value":"import pyttsx3\n\nengine = pyttsx3.init() # object creation\n\n\"\"\" RATE\"\"\"\nrate = engine.getProperty(\"rate\") # getting details of current speaking rate\nprint(rate) # printing current voice rate\nengine.setProperty(\"rate\", 125) # setting up new voice rate\n\n\n\"\"\"VOLUME\"\"\"\nvolume = engine.getProperty(\n \"volume\"\n) # getting to know current volume level (min=0 and max=1)\nprint(volume) # printing current volume level\nengine.setProperty(\"volume\", 1.0) # setting up volume level between 0 and 1\n\n\"\"\"VOICE\"\"\"\nvoices = engine.getProperty(\"voices\") # getting details of current voice\nprint(voices)\nengine.setProperty(\"voice\", voices[0].id) # changing index, changes voices. o for male\n# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female\n\nengine.say(\"Hello World!\")\nengine.say(\"说什么 current speaking rate is \" + str(rate))\nengine.runAndWait()\nengine.stop()\n\n\"\"\"Saving Voice to a file\"\"\"\n# On linux make sure that 'espeak' and 'ffmpeg' are installed\nengine.save_to_file(\"Hello World\", \"test.mp3\")\nengine.runAndWait()\n"},"size":{"kind":"number","value":1054,"string":"1,054"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":30.969696031221336,"string":"30.969696"},"max_line_length":{"kind":"number","value":91,"string":"91"},"alphanum_fraction":{"kind":"number","value":0.7210626179117052,"string":"0.721063"}}},{"rowIdx":9460,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/ref/minimal-chatbot.py"},"content":{"kind":"string","value":"import random\nimport gradio as gr\n\n\ndef alternatingly_agree(message, history):\n if len(history) % 2 == 0:\n return f\"Yes, I do think that '{message}'\"\n else:\n return \"I don't think so\"\n\n\ncount = 0\n\n\ndef textbox_update(chatui_textbox):\n global count\n count += 1\n if count % 10 == 0:\n return \"z\"\n else:\n return chatui_textbox\n\n\nif __name__ == \"__main__\":\n with gr.ChatInterface(alternatingly_agree) as chat_ui:\n chat_ui.textbox.change(\n textbox_update,\n chat_ui.textbox,\n chat_ui.textbox,\n every=1,\n trigger_mode=\"once\",\n )\n chat_ui.launch()\n"},"size":{"kind":"number","value":660,"string":"660"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":18.44117592820071,"string":"18.441176"},"max_line_length":{"kind":"number","value":58,"string":"58"},"alphanum_fraction":{"kind":"number","value":0.5545454537052341,"string":"0.554545"}}},{"rowIdx":9461,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/ref/portal.py"},"content":{"kind":"string","value":"import gradio as gr\n\n\ndef task1(input_text):\n return \"Task 1 Result: \" + input_text\n\n\ndef task2(input_image):\n return \"Task 2 Result\"\n\n\ndef task3(input_image):\n return \"Task 2 Result\"\n\n\n# interface one\niface1 = gr.Interface(\n fn=task1, inputs=\"text\", outputs=\"text\", title=\"Multi-Page Interface\"\n)\n# interface two\niface2 = gr.Interface(\n fn=task2, inputs=\"image\", outputs=\"text\", title=\"Multi-Page Interface\"\n)\n\ntts_examples = [\n \"I love learning machine learning\",\n \"How do you do?\",\n]\n\n\ntts_demo = gr.load(\n \"huggingface/facebook/fastspeech2-en-ljspeech\",\n title=None,\n examples=tts_examples,\n description=\"Give me something to say!\",\n cache_examples=False,\n)\n\nstt_demo = gr.load(\n \"huggingface/facebook/wav2vec2-base-960h\",\n title=None,\n inputs=\"mic\",\n description=\"Let me try to guess what you're saying!\",\n)\n\n\ndemo = gr.TabbedInterface(\n [iface1, iface2, tts_demo, stt_demo],\n [\"Text-to-text\", \"image-to-text\", \"Text-to-speech\", \"Speech-to-text\"],\n)\n\n# Run the interface\ndemo.launch(share=True)\n"},"size":{"kind":"number","value":1054,"string":"1,054"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":18.53703669375858,"string":"18.537037"},"max_line_length":{"kind":"number","value":74,"string":"74"},"alphanum_fraction":{"kind":"number","value":0.666034154965812,"string":"0.666034"}}},{"rowIdx":9462,"cells":{"file_path":{"kind":"string","value":"vinjn/llm-metahuman/audio-client/ref/sine-curve.py"},"content":{"kind":"string","value":"import math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n demo.queue().launch()"},"size":{"kind":"number","value":871,"string":"871"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":25.42424165381086,"string":"25.424242"},"max_line_length":{"kind":"number","value":94,"string":"94"},"alphanum_fraction":{"kind":"number","value":0.6061997696828935,"string":"0.6062"}}},{"rowIdx":9463,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/README.md"},"content":{"kind":"string","value":"# omni-tetGen\nAn omniverse extension to generate soft body meshes\n\n\n\n## Description:\n\nomni-tetGen uses the famous tetgen mesh generator developed by Hang Si to create tetrahedral and edge meshes for soft body simulation. The extension allows for a user-friendly drag-and-drop mechanism for input mesh data in standard .obj format. Then, it runs the python tetgen wrapper to create meshes which are converted to numpy arrays and described with additional infomration like edges rest lengths or tetrahedra volumes. Generated mesh is added to the stage with additional attributes:\n- edge\n- edgesRestLengths\n- elem\n- tetrahedronsRestVolumes\n- inverseMasses\n\n\n\n## PBD .ogn node\n\nAdditionally, an omniverse node with a simple Position Based Dynamics algorithm implementation with CUDA kernels is attached in order to test generated meshes.\n\n\n\n## Usage\n\n- [Install omniverse](https://www.nvidia.com/en-us/omniverse/) with e.g. create environment\n- Go to: Window -> Extensions -> Gear icon -> Add extension search path: `git://github.com/mnaskret/omni-tetGen.git?branch=main`\n- Find Tetrahedralizer in the list of extensions and turn it on (preferably with autoload)\n- In the Tetrahedralizer window you can drop any .obj file from Omniverse Content browser, choose preferred options and generate a cool mesh\n- Add a graph with PBDBasicGravity node or create your own node that utilizes mesh extra attributes to have fun with your mesh\n"},"size":{"kind":"number","value":1824,"string":"1,824"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":59.833331338888954,"string":"59.833331"},"max_line_length":{"kind":"number","value":491,"string":"491"},"alphanum_fraction":{"kind":"number","value":0.8009868416661255,"string":"0.800987"}}},{"rowIdx":9464,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/config/extension.toml"},"content":{"kind":"string","value":"[package]\n# Semantic Versionning is used: https://semver.org/\nversion = \"1.0.0\"\n\n# The title and description fields are primarily for displaying extension info in UI\ntitle = \"Tetrahedralizer\"\ndescription=\"Generates a tetrahedral mesh from an external triangle mesh.\"\n\n# Path (relative to the root) or content of readme markdown file for UI.\nreadme = \"docs/README.md\"\n\n# URL of the extension source repository.\nrepository = \"\"\n\n# One of categories for UI.\ncategory = \"Example\"\n\n# Keywords for the extension\nkeywords = [\"kit\", \"example\"]\n\n# Use omni.ui to build simple UI\n[dependencies]\n\"omni.kit.uiapp\" = {}\n\n# Main python module this extension provides, it will be publicly available as \"import mnresearch.tetgen\".\n[[python.module]]\nname = \"mnresearch.tetgen\"\n\n[python.pipapi]\nrequirements = ['numpy', 'pxr', 'pyvista', 'tetgenExt==0.6.dev0', 'warp']\nuse_online_index = true\n"},"size":{"kind":"number","value":876,"string":"876"},"lang":{"kind":"string","value":"TOML"},"avg_line_length":{"kind":"number","value":26.406249174804714,"string":"26.406249"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.7328767114921498,"string":"0.732877"}}},{"rowIdx":9465,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/extension.py"},"content":{"kind":"string","value":"import omni.ext\nimport omni.ui as ui\nimport omni.kit.commands as commands\nimport pxr\nfrom pxr import Sdf\nimport numpy as np\nimport tetgenExt\nimport os\nimport math\nimport warp as wp\n\n\nclass MyExtension(omni.ext.IExt):\n\n fileUrl = ''\n\n def drop_accept(url, ext):\n # Accept drops of specific extension only\n print(\"File dropped\")\n return url.endswith(ext)\n\n def drop(widget, event):\n widget.text = event.mime_data\n MyExtension.fileUrl = event.mime_data\n\n def drop_area(self, ext):\n # If drop is acceptable, the rectangle is blue\n style = {}\n style[\"Rectangle\"] = {\"background_color\": 0xFF999999}\n style[\"Rectangle:drop\"] = {\"background_color\": 0xFF994400}\n\n stack = ui.ZStack()\n\n with stack:\n ui.Rectangle(style=style)\n text = ui.Label(f\"Accepts {ext}\", alignment=ui.Alignment.CENTER, word_wrap=True)\n\n self.fileUrl = stack.set_accept_drop_fn(lambda d, e=ext: MyExtension.drop_accept(d, e))\n stack.set_drop_fn(lambda a, w=text: MyExtension.drop(w, a))\n\n def createMesh(usd_context, stage, meshName):\n commands.execute('CreateReferenceCommand',\n usd_context=usd_context,\n path_to='/World/' + meshName,\n asset_path=MyExtension.fileUrl,\n instanceable=True)\n prim = stage.GetPrimAtPath('/World/' + meshName + '/' + meshName + '/' + meshName)\n return prim\n\n def addAttributes(stage, prim, node, elem, face, edge, normals, colors, meshName):\n\n numberOfTris = int(face.shape[0] / 3)\n faceCount = np.full((numberOfTris), 3)\n\n mesh = pxr.PhysicsSchemaTools.createMesh(stage,\n pxr.Sdf.Path('/World/' + meshName + 'Mesh'),\n node.tolist(),\n normals.tolist(),\n face.tolist(),\n faceCount.tolist())\n\n newPrim = stage.GetPrimAtPath('/World/' + meshName + 'Mesh')\n\n velocitiesNP = np.zeros_like(node)\n inverseMasses = np.ones(len(node), dtype=float)\n edgesRestLengths = np.zeros(len(edge), dtype=float)\n tetrahedronsRestVolumes = np.zeros(len(elem), dtype=float)\n\n for i in range(len(edge)):\n edgesRestLengths[i] = np.linalg.norm(node[edge[i][0]] - node[edge[i][1]])\n\n for i in range(len(elem)):\n tetrahedronPositionA = node[elem[i][0]]\n tetrahedronPositionB = node[elem[i][1]]\n tetrahedronPositionC = node[elem[i][2]]\n tetrahedronPositionD = node[elem[i][3]]\n\n p1 = tetrahedronPositionB - tetrahedronPositionA\n p2 = tetrahedronPositionC - tetrahedronPositionA\n p3 = tetrahedronPositionD - tetrahedronPositionA\n\n volume = wp.dot(wp.cross(p1, p2), p3) / 6.0\n\n tetrahedronsRestVolumes[i] = volume\n\n velocitiesValue = pxr.Vt.Vec3fArray().FromNumpy(velocitiesNP)\n elemValue = pxr.Vt.Vec4iArray().FromNumpy(elem)\n edgeValue = pxr.Vt.Vec2iArray().FromNumpy(edge)\n edgesRestLengthsValue = pxr.Vt.FloatArray().FromNumpy(edgesRestLengths)\n inverseMassesValue = pxr.Vt.FloatArray().FromNumpy(inverseMasses)\n tetrahedronsRestVolumesValue = pxr.Vt.FloatArray().FromNumpy(tetrahedronsRestVolumes)\n\n elemAtt = newPrim.CreateAttribute('elem', Sdf.ValueTypeNames.Int4Array)\n edgeAtt = newPrim.CreateAttribute('edge', Sdf.ValueTypeNames.Int2Array)\n edgesRestLengthsAtt = newPrim.CreateAttribute('edgesRestLengths', Sdf.ValueTypeNames.FloatArray)\n inverseMassesAtt = newPrim.CreateAttribute('inverseMasses', Sdf.ValueTypeNames.FloatArray)\n tetrahedronsRestVolumesAtt = newPrim.CreateAttribute('tetrahedronsRestVolumes', Sdf.ValueTypeNames.FloatArray)\n\n velocitiesAtt = newPrim.GetAttribute('velocities')\n\n velocitiesAtt.Set(velocitiesValue)\n elemAtt.Set(elemValue)\n edgeAtt.Set(edgeValue)\n edgesRestLengthsAtt.Set(edgesRestLengthsValue)\n inverseMassesAtt.Set(inverseMassesValue)\n tetrahedronsRestVolumesAtt.Set(tetrahedronsRestVolumesValue)\n\n return mesh, newPrim\n\n def extractMeshDataToNP(prim):\n points = prim.GetAttribute('points').Get()\n faces = prim.GetAttribute('faceVertexIndices').Get()\n\n pointsNP = np.array(points, dtype=float)\n facesNP = np.array(faces, dtype=int)\n facesNP = facesNP.reshape((-1, 3))\n\n return pointsNP, facesNP\n\n def setPLC(self, value):\n self.PLC = value\n\n def setQuality(self, value):\n self.Quality = value\n\n def cross(a, b):\n c = [a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0]]\n\n return c\n\n def calculateNormals(node, face):\n numberOfTris = int(face.shape[0] / 3)\n normals = np.empty_like(node)\n\n for i in range(numberOfTris):\n pIdA = face[i][0]\n pIdB = face[i][1]\n pIdC = face[i][2]\n\n pA = node[pIdA]\n pB = node[pIdB]\n pC = node[pIdC]\n\n vA = pB - pA\n vB = pC - pA\n normal = MyExtension.cross(vA, vB)\n normalized = np.linalg.norm(normal)\n\n normals[pIdA] += normalized\n normals[pIdB] += normalized\n normals[pIdC] += normalized\n\n return normals\n\n def on_startup(self, ext_id):\n print(\"[mnresearch.tetgen] MyExtension startup\")\n self._window = ui.Window(\"Tetrahedralizer\", width=300, height=300)\n with self._window.frame:\n\n self.PLC = False\n self.Quality = False\n\n with ui.VStack():\n\n MyExtension.drop_area(self, \".obj\")\n\n with ui.HStack():\n ui.Label(\"PLC\", height=0)\n plcCB = ui.CheckBox(width=20)\n plcCB.model.add_value_changed_fn(\n lambda a: MyExtension.setPLC(self, a.get_value_as_bool()))\n with ui.HStack():\n ui.Label(\"Quality\", height=0)\n qualityCB = ui.CheckBox(width=20)\n qualityCB.model.add_value_changed_fn(\n lambda a: MyExtension.setQuality(self, a.get_value_as_bool()))\n\n def on_click():\n print(\"clicked!\")\n\n self.usd_context = omni.usd.get_context()\n self.stage = self.usd_context.get_stage()\n\n if MyExtension.fileUrl != \"\":\n meshName = MyExtension.fileUrl.split(os.sep)[-1][:-4]\n prim = MyExtension.createMesh(self.usd_context, self.stage, meshName)\n points, faces = MyExtension.extractMeshDataToNP(prim)\n tet = tetgenExt.TetGen(points, faces)\n\n print('Running tetGen on: ', MyExtension.fileUrl,\n '\\nwith options:',\n 'PLC: ', self.PLC,\n '\\nQuality: ', self.Quality)\n\n node, elem, face, edge = tet.tetrahedralize(quality=True,\n plc=True,\n facesout=1,\n edgesout=1)\n normals = MyExtension.calculateNormals(node, face)\n colors = np.ones_like(normals)\n face = face.ravel()\n mesh, newPrim = MyExtension.addAttributes(self.stage,\n prim,\n node,\n elem,\n face,\n edge,\n normals,\n colors,\n meshName)\n pxr.Usd.Stage.RemovePrim(self.stage, '/World/' + meshName)\n\n ui.Button(\"Generate tetrahedral mesh\", clicked_fn=lambda: on_click())\n\n def on_shutdown(self):\n print(\"[mnresearch.tetgen] MyExtension shutdown\")\n"},"size":{"kind":"number","value":8644,"string":"8,644"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":38.47488566906445,"string":"38.474886"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.5180472003102675,"string":"0.518047"}}},{"rowIdx":9466,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/__init__.py"},"content":{"kind":"string","value":"from .extension import *"},"size":{"kind":"number","value":24,"string":"24"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":23.999976000024002,"string":"23.999976"},"max_line_length":{"kind":"number","value":24,"string":"24"},"alphanum_fraction":{"kind":"number","value":0.7916666336805569,"string":"0.791667"}}},{"rowIdx":9467,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/PBDBasicGravityDatabase.py"},"content":{"kind":"string","value":"\"\"\"Support for simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity\n\nPBDBasicGravity\n\"\"\"\n\nimport omni.graph.core as og\nimport traceback\nimport sys\nimport numpy\nclass PBDBasicGravityDatabase(og.Database):\n \"\"\"Helper class providing simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity\n\n Class Members:\n node: Node being evaluated\n\n Attribute Value Properties:\n Inputs:\n inputs.edge\n inputs.edgesRestLengths\n inputs.elem\n inputs.gravity\n inputs.ground\n inputs.inverseMasses\n inputs.ks_distance\n inputs.ks_volume\n inputs.num_substeps\n inputs.points\n inputs.sim_constraints\n inputs.tetrahedronsRestVolumes\n inputs.velocities\n inputs.velocity_dampening\n Outputs:\n outputs.points\n outputs.velocities\n \"\"\"\n # This is an internal object that provides per-class storage of a per-node data dictionary\n PER_NODE_DATA = {}\n # This is an internal object that describes unchanging attributes in a generic way\n # The values in this list are in no particular order, as a per-attribute tuple\n # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, Is_Required, DefaultValue\n # You should not need to access any of this data directly, use the defined database interfaces\n INTERFACE = og.Database._get_interface([\n ('inputs:edge', 'int2[]', 0, None, 'Input edges', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:edgesRestLengths', 'float[]', 0, None, 'Input edges rest lengths', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:elem', 'int4[]', 0, None, 'Input tetrahedrons', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:gravity', 'vector3f', 0, None, 'Gravity constant', {og.MetadataKeys.DEFAULT: '[0.0, -9.8, 0.0]'}, True, [0.0, -9.8, 0.0]),\n ('inputs:ground', 'float', 0, None, 'Ground level', {og.MetadataKeys.DEFAULT: '-100.0'}, True, -100.0),\n ('inputs:inverseMasses', 'float[]', 0, None, 'Inverse masses', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:ks_distance', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),\n ('inputs:ks_volume', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),\n ('inputs:num_substeps', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '8'}, True, 8),\n ('inputs:points', 'point3f[]', 0, None, 'Input points', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:sim_constraints', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '1'}, True, 1),\n ('inputs:tetrahedronsRestVolumes', 'float[]', 0, None, 'Input tetrahedrons rest volumes', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:velocities', 'vector3f[]', 0, None, 'Input velocities', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:velocity_dampening', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '0.1'}, True, 0.1),\n ('outputs:points', 'point3f[]', 0, None, 'Output points', {}, True, None),\n ('outputs:velocities', 'vector3f[]', 0, None, 'Output velocities', {}, True, None),\n ])\n @classmethod\n def _populate_role_data(cls):\n \"\"\"Populate a role structure with the non-default roles on this node type\"\"\"\n role_data = super()._populate_role_data()\n role_data.inputs.gravity = og.Database.ROLE_VECTOR\n role_data.inputs.points = og.Database.ROLE_POINT\n role_data.inputs.velocities = og.Database.ROLE_VECTOR\n role_data.outputs.points = og.Database.ROLE_POINT\n role_data.outputs.velocities = og.Database.ROLE_VECTOR\n return role_data\n class ValuesForInputs(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to input attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n\n @property\n def edge(self):\n data_view = og.AttributeValueHelper(self._attributes.edge)\n return data_view.get()\n\n @edge.setter\n def edge(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.edge)\n data_view = og.AttributeValueHelper(self._attributes.edge)\n data_view.set(value)\n self.edge_size = data_view.get_array_size()\n\n @property\n def edgesRestLengths(self):\n data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)\n return data_view.get()\n\n @edgesRestLengths.setter\n def edgesRestLengths(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.edgesRestLengths)\n data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)\n data_view.set(value)\n self.edgesRestLengths_size = data_view.get_array_size()\n\n @property\n def elem(self):\n data_view = og.AttributeValueHelper(self._attributes.elem)\n return data_view.get()\n\n @elem.setter\n def elem(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.elem)\n data_view = og.AttributeValueHelper(self._attributes.elem)\n data_view.set(value)\n self.elem_size = data_view.get_array_size()\n\n @property\n def gravity(self):\n data_view = og.AttributeValueHelper(self._attributes.gravity)\n return data_view.get()\n\n @gravity.setter\n def gravity(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.gravity)\n data_view = og.AttributeValueHelper(self._attributes.gravity)\n data_view.set(value)\n\n @property\n def ground(self):\n data_view = og.AttributeValueHelper(self._attributes.ground)\n return data_view.get()\n\n @ground.setter\n def ground(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ground)\n data_view = og.AttributeValueHelper(self._attributes.ground)\n data_view.set(value)\n\n @property\n def inverseMasses(self):\n data_view = og.AttributeValueHelper(self._attributes.inverseMasses)\n return data_view.get()\n\n @inverseMasses.setter\n def inverseMasses(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.inverseMasses)\n data_view = og.AttributeValueHelper(self._attributes.inverseMasses)\n data_view.set(value)\n self.inverseMasses_size = data_view.get_array_size()\n\n @property\n def ks_distance(self):\n data_view = og.AttributeValueHelper(self._attributes.ks_distance)\n return data_view.get()\n\n @ks_distance.setter\n def ks_distance(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ks_distance)\n data_view = og.AttributeValueHelper(self._attributes.ks_distance)\n data_view.set(value)\n\n @property\n def ks_volume(self):\n data_view = og.AttributeValueHelper(self._attributes.ks_volume)\n return data_view.get()\n\n @ks_volume.setter\n def ks_volume(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ks_volume)\n data_view = og.AttributeValueHelper(self._attributes.ks_volume)\n data_view.set(value)\n\n @property\n def num_substeps(self):\n data_view = og.AttributeValueHelper(self._attributes.num_substeps)\n return data_view.get()\n\n @num_substeps.setter\n def num_substeps(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.num_substeps)\n data_view = og.AttributeValueHelper(self._attributes.num_substeps)\n data_view.set(value)\n\n @property\n def points(self):\n data_view = og.AttributeValueHelper(self._attributes.points)\n return data_view.get()\n\n @points.setter\n def points(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.points)\n data_view = og.AttributeValueHelper(self._attributes.points)\n data_view.set(value)\n self.points_size = data_view.get_array_size()\n\n @property\n def sim_constraints(self):\n data_view = og.AttributeValueHelper(self._attributes.sim_constraints)\n return data_view.get()\n\n @sim_constraints.setter\n def sim_constraints(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.sim_constraints)\n data_view = og.AttributeValueHelper(self._attributes.sim_constraints)\n data_view.set(value)\n\n @property\n def tetrahedronsRestVolumes(self):\n data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)\n return data_view.get()\n\n @tetrahedronsRestVolumes.setter\n def tetrahedronsRestVolumes(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.tetrahedronsRestVolumes)\n data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)\n data_view.set(value)\n self.tetrahedronsRestVolumes_size = data_view.get_array_size()\n\n @property\n def velocities(self):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n return data_view.get()\n\n @velocities.setter\n def velocities(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.velocities)\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n data_view.set(value)\n self.velocities_size = data_view.get_array_size()\n\n @property\n def velocity_dampening(self):\n data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)\n return data_view.get()\n\n @velocity_dampening.setter\n def velocity_dampening(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.velocity_dampening)\n data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)\n data_view.set(value)\n class ValuesForOutputs(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to output attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n self.points_size = None\n self.velocities_size = None\n\n @property\n def points(self):\n data_view = og.AttributeValueHelper(self._attributes.points)\n return data_view.get(reserved_element_count = self.points_size)\n\n @points.setter\n def points(self, value):\n data_view = og.AttributeValueHelper(self._attributes.points)\n data_view.set(value)\n self.points_size = data_view.get_array_size()\n\n @property\n def velocities(self):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n return data_view.get(reserved_element_count = self.velocities_size)\n\n @velocities.setter\n def velocities(self, value):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n data_view.set(value)\n self.velocities_size = data_view.get_array_size()\n class ValuesForState(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to state attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n def __init__(self, node):\n super().__init__(node)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)\n self.inputs = PBDBasicGravityDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)\n self.outputs = PBDBasicGravityDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)\n self.state = PBDBasicGravityDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)\n class abi:\n \"\"\"Class defining the ABI interface for the node type\"\"\"\n @staticmethod\n def get_node_type():\n get_node_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'get_node_type', None)\n if callable(get_node_type_function):\n return get_node_type_function()\n return 'mnresearch.tetgen.PBDBasicGravity'\n @staticmethod\n def compute(context, node):\n db = PBDBasicGravityDatabase(node)\n try:\n db.inputs._setting_locked = True\n compute_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'compute', None)\n if callable(compute_function) and compute_function.__code__.co_argcount > 1:\n return compute_function(context, node)\n return PBDBasicGravityDatabase.NODE_TYPE_CLASS.compute(db)\n except Exception as error:\n stack_trace = \"\".join(traceback.format_tb(sys.exc_info()[2].tb_next))\n db.log_error(f'Assertion raised in compute - {error}\\n{stack_trace}', add_context=False)\n finally:\n db.inputs._setting_locked = False\n return False\n @staticmethod\n def initialize(context, node):\n PBDBasicGravityDatabase._initialize_per_node_data(node)\n\n # Set any default values the attributes have specified\n if not node._do_not_use():\n db = PBDBasicGravityDatabase(node)\n db.inputs.edge = []\n db.inputs.edgesRestLengths = []\n db.inputs.elem = []\n db.inputs.gravity = [0.0, -9.8, 0.0]\n db.inputs.ground = -100.0\n db.inputs.inverseMasses = []\n db.inputs.ks_distance = 1.0\n db.inputs.ks_volume = 1.0\n db.inputs.num_substeps = 8\n db.inputs.points = []\n db.inputs.sim_constraints = 1\n db.inputs.tetrahedronsRestVolumes = []\n db.inputs.velocities = []\n db.inputs.velocity_dampening = 0.1\n initialize_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize', None)\n if callable(initialize_function):\n initialize_function(context, node)\n @staticmethod\n def release(node):\n release_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'release', None)\n if callable(release_function):\n release_function(node)\n PBDBasicGravityDatabase._release_per_node_data(node)\n @staticmethod\n def update_node_version(context, node, old_version, new_version):\n update_node_version_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'update_node_version', None)\n if callable(update_node_version_function):\n return update_node_version_function(context, node, old_version, new_version)\n return False\n @staticmethod\n def initialize_type(node_type):\n initialize_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize_type', None)\n needs_initializing = True\n if callable(initialize_type_function):\n needs_initializing = initialize_type_function(node_type)\n if needs_initializing:\n node_type.set_metadata(og.MetadataKeys.EXTENSION, \"mnresearch.tetgen\")\n node_type.set_metadata(og.MetadataKeys.UI_NAME, \"PBDBasicGravity\")\n node_type.set_metadata(og.MetadataKeys.DESCRIPTION, \"PBDBasicGravity\")\n node_type.set_metadata(og.MetadataKeys.LANGUAGE, \"Python\")\n PBDBasicGravityDatabase.INTERFACE.add_to_node_type(node_type)\n @staticmethod\n def on_connection_type_resolve(node):\n on_connection_type_resolve_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)\n if callable(on_connection_type_resolve_function):\n on_connection_type_resolve_function(node)\n NODE_TYPE_CLASS = None\n GENERATOR_VERSION = (1, 4, 0)\n TARGET_VERSION = (2, 29, 1)\n @staticmethod\n def register(node_type_class):\n PBDBasicGravityDatabase.NODE_TYPE_CLASS = node_type_class\n og.register_node_type(PBDBasicGravityDatabase.abi, 1)\n @staticmethod\n def deregister():\n og.deregister_node_type(\"mnresearch.tetgen.PBDBasicGravity\")\n"},"size":{"kind":"number","value":17984,"string":"17,984"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":46.20472428817658,"string":"46.204724"},"max_line_length":{"kind":"number","value":141,"string":"141"},"alphanum_fraction":{"kind":"number","value":0.6241103202499938,"string":"0.62411"}}},{"rowIdx":9468,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/ogn/OgnNewNodeDatabase.py"},"content":{"kind":"string","value":"\"\"\"Support for simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity\n\nPBDGravity\n\"\"\"\n\nimport omni.graph.core as og\nimport sys\nimport traceback\nimport numpy\nclass OgnNewNodeDatabase(og.Database):\n \"\"\"Helper class providing simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity\n\n Class Members:\n node: Node being evaluated\n\n Attribute Value Properties:\n Inputs:\n inputs.edge\n inputs.edgesRestLengths\n inputs.elem\n inputs.gravity\n inputs.ground\n inputs.inverseMasses\n inputs.ks_distance\n inputs.ks_volume\n inputs.num_substeps\n inputs.points\n inputs.sim_constraints\n inputs.tetrahedronsRestVolumes\n inputs.velocities\n inputs.velocity_dampening\n Outputs:\n outputs.points\n outputs.velocities\n \"\"\"\n # This is an internal object that provides per-class storage of a per-node data dictionary\n PER_NODE_DATA = {}\n # This is an internal object that describes unchanging attributes in a generic way\n # The values in this list are in no particular order, as a per-attribute tuple\n # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, Is_Required, DefaultValue\n # You should not need to access any of this data directly, use the defined database interfaces\n INTERFACE = og.Database._get_interface([\n ('inputs:edge', 'int2[]', 0, None, 'Input edges', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:edgesRestLengths', 'float[]', 0, None, 'Input edges rest lengths', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:elem', 'int4[]', 0, None, 'Input tetrahedrons', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:gravity', 'vector3f', 0, None, 'Gravity constant', {og.MetadataKeys.DEFAULT: '[0.0, -9.8, 0.0]'}, True, [0.0, -9.8, 0.0]),\n ('inputs:ground', 'float', 0, None, 'Ground level', {og.MetadataKeys.DEFAULT: '-100.0'}, True, -100.0),\n ('inputs:inverseMasses', 'float[]', 0, None, 'Inverse masses', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:ks_distance', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),\n ('inputs:ks_volume', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),\n ('inputs:num_substeps', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '8'}, True, 8),\n ('inputs:points', 'point3f[]', 0, None, 'Input points', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:sim_constraints', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '1'}, True, 1),\n ('inputs:tetrahedronsRestVolumes', 'float[]', 0, None, 'Input tetrahedrons rest volumes', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:velocities', 'vector3f[]', 0, None, 'Input velocities', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:velocity_dampening', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '0.1'}, True, 0.1),\n ('outputs:points', 'point3f[]', 0, None, 'Output points', {}, True, None),\n ('outputs:velocities', 'vector3f[]', 0, None, 'Output velocities', {}, True, None),\n ])\n @classmethod\n def _populate_role_data(cls):\n \"\"\"Populate a role structure with the non-default roles on this node type\"\"\"\n role_data = super()._populate_role_data()\n role_data.inputs.gravity = og.Database.ROLE_VECTOR\n role_data.inputs.points = og.Database.ROLE_POINT\n role_data.inputs.velocities = og.Database.ROLE_VECTOR\n role_data.outputs.points = og.Database.ROLE_POINT\n role_data.outputs.velocities = og.Database.ROLE_VECTOR\n return role_data\n class ValuesForInputs(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to input attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n\n @property\n def edge(self):\n data_view = og.AttributeValueHelper(self._attributes.edge)\n return data_view.get()\n\n @edge.setter\n def edge(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.edge)\n data_view = og.AttributeValueHelper(self._attributes.edge)\n data_view.set(value)\n self.edge_size = data_view.get_array_size()\n\n @property\n def edgesRestLengths(self):\n data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)\n return data_view.get()\n\n @edgesRestLengths.setter\n def edgesRestLengths(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.edgesRestLengths)\n data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)\n data_view.set(value)\n self.edgesRestLengths_size = data_view.get_array_size()\n\n @property\n def elem(self):\n data_view = og.AttributeValueHelper(self._attributes.elem)\n return data_view.get()\n\n @elem.setter\n def elem(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.elem)\n data_view = og.AttributeValueHelper(self._attributes.elem)\n data_view.set(value)\n self.elem_size = data_view.get_array_size()\n\n @property\n def gravity(self):\n data_view = og.AttributeValueHelper(self._attributes.gravity)\n return data_view.get()\n\n @gravity.setter\n def gravity(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.gravity)\n data_view = og.AttributeValueHelper(self._attributes.gravity)\n data_view.set(value)\n\n @property\n def ground(self):\n data_view = og.AttributeValueHelper(self._attributes.ground)\n return data_view.get()\n\n @ground.setter\n def ground(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ground)\n data_view = og.AttributeValueHelper(self._attributes.ground)\n data_view.set(value)\n\n @property\n def inverseMasses(self):\n data_view = og.AttributeValueHelper(self._attributes.inverseMasses)\n return data_view.get()\n\n @inverseMasses.setter\n def inverseMasses(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.inverseMasses)\n data_view = og.AttributeValueHelper(self._attributes.inverseMasses)\n data_view.set(value)\n self.inverseMasses_size = data_view.get_array_size()\n\n @property\n def ks_distance(self):\n data_view = og.AttributeValueHelper(self._attributes.ks_distance)\n return data_view.get()\n\n @ks_distance.setter\n def ks_distance(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ks_distance)\n data_view = og.AttributeValueHelper(self._attributes.ks_distance)\n data_view.set(value)\n\n @property\n def ks_volume(self):\n data_view = og.AttributeValueHelper(self._attributes.ks_volume)\n return data_view.get()\n\n @ks_volume.setter\n def ks_volume(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ks_volume)\n data_view = og.AttributeValueHelper(self._attributes.ks_volume)\n data_view.set(value)\n\n @property\n def num_substeps(self):\n data_view = og.AttributeValueHelper(self._attributes.num_substeps)\n return data_view.get()\n\n @num_substeps.setter\n def num_substeps(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.num_substeps)\n data_view = og.AttributeValueHelper(self._attributes.num_substeps)\n data_view.set(value)\n\n @property\n def points(self):\n data_view = og.AttributeValueHelper(self._attributes.points)\n return data_view.get()\n\n @points.setter\n def points(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.points)\n data_view = og.AttributeValueHelper(self._attributes.points)\n data_view.set(value)\n self.points_size = data_view.get_array_size()\n\n @property\n def sim_constraints(self):\n data_view = og.AttributeValueHelper(self._attributes.sim_constraints)\n return data_view.get()\n\n @sim_constraints.setter\n def sim_constraints(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.sim_constraints)\n data_view = og.AttributeValueHelper(self._attributes.sim_constraints)\n data_view.set(value)\n\n @property\n def tetrahedronsRestVolumes(self):\n data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)\n return data_view.get()\n\n @tetrahedronsRestVolumes.setter\n def tetrahedronsRestVolumes(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.tetrahedronsRestVolumes)\n data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)\n data_view.set(value)\n self.tetrahedronsRestVolumes_size = data_view.get_array_size()\n\n @property\n def velocities(self):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n return data_view.get()\n\n @velocities.setter\n def velocities(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.velocities)\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n data_view.set(value)\n self.velocities_size = data_view.get_array_size()\n\n @property\n def velocity_dampening(self):\n data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)\n return data_view.get()\n\n @velocity_dampening.setter\n def velocity_dampening(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.velocity_dampening)\n data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)\n data_view.set(value)\n class ValuesForOutputs(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to output attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n self.points_size = None\n self.velocities_size = None\n\n @property\n def points(self):\n data_view = og.AttributeValueHelper(self._attributes.points)\n return data_view.get(reserved_element_count = self.points_size)\n\n @points.setter\n def points(self, value):\n data_view = og.AttributeValueHelper(self._attributes.points)\n data_view.set(value)\n self.points_size = data_view.get_array_size()\n\n @property\n def velocities(self):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n return data_view.get(reserved_element_count = self.velocities_size)\n\n @velocities.setter\n def velocities(self, value):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n data_view.set(value)\n self.velocities_size = data_view.get_array_size()\n class ValuesForState(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to state attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n def __init__(self, node):\n super().__init__(node)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)\n self.inputs = OgnNewNodeDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)\n self.outputs = OgnNewNodeDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)\n self.state = OgnNewNodeDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)\n class abi:\n \"\"\"Class defining the ABI interface for the node type\"\"\"\n @staticmethod\n def get_node_type():\n get_node_type_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'get_node_type', None)\n if callable(get_node_type_function):\n return get_node_type_function()\n return 'mnresearch.tetgen.PBDBasicGravity'\n @staticmethod\n def compute(context, node):\n db = OgnNewNodeDatabase(node)\n try:\n db.inputs._setting_locked = True\n compute_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'compute', None)\n if callable(compute_function) and compute_function.__code__.co_argcount > 1:\n return compute_function(context, node)\n return OgnNewNodeDatabase.NODE_TYPE_CLASS.compute(db)\n except Exception as error:\n stack_trace = \"\".join(traceback.format_tb(sys.exc_info()[2].tb_next))\n db.log_error(f'Assertion raised in compute - {error}\\n{stack_trace}', add_context=False)\n finally:\n db.inputs._setting_locked = False\n return False\n @staticmethod\n def initialize(context, node):\n OgnNewNodeDatabase._initialize_per_node_data(node)\n\n # Set any default values the attributes have specified\n if not node._do_not_use():\n db = OgnNewNodeDatabase(node)\n db.inputs.edge = []\n db.inputs.edgesRestLengths = []\n db.inputs.elem = []\n db.inputs.gravity = [0.0, -9.8, 0.0]\n db.inputs.ground = -100.0\n db.inputs.inverseMasses = []\n db.inputs.ks_distance = 1.0\n db.inputs.ks_volume = 1.0\n db.inputs.num_substeps = 8\n db.inputs.points = []\n db.inputs.sim_constraints = 1\n db.inputs.tetrahedronsRestVolumes = []\n db.inputs.velocities = []\n db.inputs.velocity_dampening = 0.1\n initialize_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'initialize', None)\n if callable(initialize_function):\n initialize_function(context, node)\n @staticmethod\n def release(node):\n release_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'release', None)\n if callable(release_function):\n release_function(node)\n OgnNewNodeDatabase._release_per_node_data(node)\n @staticmethod\n def update_node_version(context, node, old_version, new_version):\n update_node_version_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'update_node_version', None)\n if callable(update_node_version_function):\n return update_node_version_function(context, node, old_version, new_version)\n return False\n @staticmethod\n def initialize_type(node_type):\n initialize_type_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'initialize_type', None)\n needs_initializing = True\n if callable(initialize_type_function):\n needs_initializing = initialize_type_function(node_type)\n if needs_initializing:\n node_type.set_metadata(og.MetadataKeys.EXTENSION, \"mnaskret.pbdgravity\")\n node_type.set_metadata(og.MetadataKeys.UI_NAME, \"PBDGravity\")\n node_type.set_metadata(og.MetadataKeys.DESCRIPTION, \"PBDGravity\")\n node_type.set_metadata(og.MetadataKeys.LANGUAGE, \"Python\")\n OgnNewNodeDatabase.INTERFACE.add_to_node_type(node_type)\n @staticmethod\n def on_connection_type_resolve(node):\n on_connection_type_resolve_function = getattr(OgnNewNodeDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)\n if callable(on_connection_type_resolve_function):\n on_connection_type_resolve_function(node)\n NODE_TYPE_CLASS = None\n GENERATOR_VERSION = (1, 4, 0)\n TARGET_VERSION = (2, 29, 1)\n @staticmethod\n def register(node_type_class):\n OgnNewNodeDatabase.NODE_TYPE_CLASS = node_type_class\n og.register_node_type(OgnNewNodeDatabase.abi, 1)\n @staticmethod\n def deregister():\n og.deregister_node_type(\"mnaskret.pbdgravity.PBDGravity\")\n"},"size":{"kind":"number","value":17873,"string":"17,873"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":45.91338570626408,"string":"45.913386"},"max_line_length":{"kind":"number","value":141,"string":"141"},"alphanum_fraction":{"kind":"number","value":0.6217758629988376,"string":"0.621776"}}},{"rowIdx":9469,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/ogn/PBDBasicGravityDatabase.py"},"content":{"kind":"string","value":"\"\"\"Support for simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity\n\nPBDBasicGravity\n\"\"\"\n\nimport omni.graph.core as og\nimport traceback\nimport sys\nimport numpy\nclass PBDBasicGravityDatabase(og.Database):\n \"\"\"Helper class providing simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity\n\n Class Members:\n node: Node being evaluated\n\n Attribute Value Properties:\n Inputs:\n inputs.edge\n inputs.edgesRestLengths\n inputs.elem\n inputs.gravity\n inputs.ground\n inputs.inverseMasses\n inputs.ks_distance\n inputs.ks_volume\n inputs.num_substeps\n inputs.points\n inputs.sim_constraints\n inputs.tetrahedronsRestVolumes\n inputs.velocities\n inputs.velocity_dampening\n Outputs:\n outputs.points\n outputs.velocities\n \"\"\"\n # This is an internal object that provides per-class storage of a per-node data dictionary\n PER_NODE_DATA = {}\n # This is an internal object that describes unchanging attributes in a generic way\n # The values in this list are in no particular order, as a per-attribute tuple\n # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, Is_Required, DefaultValue\n # You should not need to access any of this data directly, use the defined database interfaces\n INTERFACE = og.Database._get_interface([\n ('inputs:edge', 'int2[]', 0, None, 'Input edges', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:edgesRestLengths', 'float[]', 0, None, 'Input edges rest lengths', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:elem', 'int4[]', 0, None, 'Input tetrahedrons', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:gravity', 'vector3f', 0, None, 'Gravity constant', {og.MetadataKeys.DEFAULT: '[0.0, -9.8, 0.0]'}, True, [0.0, -9.8, 0.0]),\n ('inputs:ground', 'float', 0, None, 'Ground level', {og.MetadataKeys.DEFAULT: '-100.0'}, True, -100.0),\n ('inputs:inverseMasses', 'float[]', 0, None, 'Inverse masses', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:ks_distance', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),\n ('inputs:ks_volume', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),\n ('inputs:num_substeps', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '8'}, True, 8),\n ('inputs:points', 'point3f[]', 0, None, 'Input points', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:sim_constraints', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '1'}, True, 1),\n ('inputs:tetrahedronsRestVolumes', 'float[]', 0, None, 'Input tetrahedrons rest volumes', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:velocities', 'vector3f[]', 0, None, 'Input velocities', {og.MetadataKeys.DEFAULT: '[]'}, True, []),\n ('inputs:velocity_dampening', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '0.1'}, True, 0.1),\n ('outputs:points', 'point3f[]', 0, None, 'Output points', {}, True, None),\n ('outputs:velocities', 'vector3f[]', 0, None, 'Output velocities', {}, True, None),\n ])\n @classmethod\n def _populate_role_data(cls):\n \"\"\"Populate a role structure with the non-default roles on this node type\"\"\"\n role_data = super()._populate_role_data()\n role_data.inputs.gravity = og.Database.ROLE_VECTOR\n role_data.inputs.points = og.Database.ROLE_POINT\n role_data.inputs.velocities = og.Database.ROLE_VECTOR\n role_data.outputs.points = og.Database.ROLE_POINT\n role_data.outputs.velocities = og.Database.ROLE_VECTOR\n return role_data\n class ValuesForInputs(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to input attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n\n @property\n def edge(self):\n data_view = og.AttributeValueHelper(self._attributes.edge)\n return data_view.get()\n\n @edge.setter\n def edge(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.edge)\n data_view = og.AttributeValueHelper(self._attributes.edge)\n data_view.set(value)\n self.edge_size = data_view.get_array_size()\n\n @property\n def edgesRestLengths(self):\n data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)\n return data_view.get()\n\n @edgesRestLengths.setter\n def edgesRestLengths(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.edgesRestLengths)\n data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)\n data_view.set(value)\n self.edgesRestLengths_size = data_view.get_array_size()\n\n @property\n def elem(self):\n data_view = og.AttributeValueHelper(self._attributes.elem)\n return data_view.get()\n\n @elem.setter\n def elem(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.elem)\n data_view = og.AttributeValueHelper(self._attributes.elem)\n data_view.set(value)\n self.elem_size = data_view.get_array_size()\n\n @property\n def gravity(self):\n data_view = og.AttributeValueHelper(self._attributes.gravity)\n return data_view.get()\n\n @gravity.setter\n def gravity(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.gravity)\n data_view = og.AttributeValueHelper(self._attributes.gravity)\n data_view.set(value)\n\n @property\n def ground(self):\n data_view = og.AttributeValueHelper(self._attributes.ground)\n return data_view.get()\n\n @ground.setter\n def ground(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ground)\n data_view = og.AttributeValueHelper(self._attributes.ground)\n data_view.set(value)\n\n @property\n def inverseMasses(self):\n data_view = og.AttributeValueHelper(self._attributes.inverseMasses)\n return data_view.get()\n\n @inverseMasses.setter\n def inverseMasses(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.inverseMasses)\n data_view = og.AttributeValueHelper(self._attributes.inverseMasses)\n data_view.set(value)\n self.inverseMasses_size = data_view.get_array_size()\n\n @property\n def ks_distance(self):\n data_view = og.AttributeValueHelper(self._attributes.ks_distance)\n return data_view.get()\n\n @ks_distance.setter\n def ks_distance(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ks_distance)\n data_view = og.AttributeValueHelper(self._attributes.ks_distance)\n data_view.set(value)\n\n @property\n def ks_volume(self):\n data_view = og.AttributeValueHelper(self._attributes.ks_volume)\n return data_view.get()\n\n @ks_volume.setter\n def ks_volume(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.ks_volume)\n data_view = og.AttributeValueHelper(self._attributes.ks_volume)\n data_view.set(value)\n\n @property\n def num_substeps(self):\n data_view = og.AttributeValueHelper(self._attributes.num_substeps)\n return data_view.get()\n\n @num_substeps.setter\n def num_substeps(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.num_substeps)\n data_view = og.AttributeValueHelper(self._attributes.num_substeps)\n data_view.set(value)\n\n @property\n def points(self):\n data_view = og.AttributeValueHelper(self._attributes.points)\n return data_view.get()\n\n @points.setter\n def points(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.points)\n data_view = og.AttributeValueHelper(self._attributes.points)\n data_view.set(value)\n self.points_size = data_view.get_array_size()\n\n @property\n def sim_constraints(self):\n data_view = og.AttributeValueHelper(self._attributes.sim_constraints)\n return data_view.get()\n\n @sim_constraints.setter\n def sim_constraints(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.sim_constraints)\n data_view = og.AttributeValueHelper(self._attributes.sim_constraints)\n data_view.set(value)\n\n @property\n def tetrahedronsRestVolumes(self):\n data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)\n return data_view.get()\n\n @tetrahedronsRestVolumes.setter\n def tetrahedronsRestVolumes(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.tetrahedronsRestVolumes)\n data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)\n data_view.set(value)\n self.tetrahedronsRestVolumes_size = data_view.get_array_size()\n\n @property\n def velocities(self):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n return data_view.get()\n\n @velocities.setter\n def velocities(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.velocities)\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n data_view.set(value)\n self.velocities_size = data_view.get_array_size()\n\n @property\n def velocity_dampening(self):\n data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)\n return data_view.get()\n\n @velocity_dampening.setter\n def velocity_dampening(self, value):\n if self._setting_locked:\n raise og.ReadOnlyError(self._attributes.velocity_dampening)\n data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)\n data_view.set(value)\n class ValuesForOutputs(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to output attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n self.points_size = None\n self.velocities_size = None\n\n @property\n def points(self):\n data_view = og.AttributeValueHelper(self._attributes.points)\n return data_view.get(reserved_element_count = self.points_size)\n\n @points.setter\n def points(self, value):\n data_view = og.AttributeValueHelper(self._attributes.points)\n data_view.set(value)\n self.points_size = data_view.get_array_size()\n\n @property\n def velocities(self):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n return data_view.get(reserved_element_count = self.velocities_size)\n\n @velocities.setter\n def velocities(self, value):\n data_view = og.AttributeValueHelper(self._attributes.velocities)\n data_view.set(value)\n self.velocities_size = data_view.get_array_size()\n class ValuesForState(og.DynamicAttributeAccess):\n \"\"\"Helper class that creates natural hierarchical access to state attributes\"\"\"\n def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):\n \"\"\"Initialize simplified access for the attribute data\"\"\"\n context = node.get_graph().get_default_graph_context()\n super().__init__(context, node, attributes, dynamic_attributes)\n def __init__(self, node):\n super().__init__(node)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)\n self.inputs = PBDBasicGravityDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)\n self.outputs = PBDBasicGravityDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)\n dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)\n self.state = PBDBasicGravityDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)\n class abi:\n \"\"\"Class defining the ABI interface for the node type\"\"\"\n @staticmethod\n def get_node_type():\n get_node_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'get_node_type', None)\n if callable(get_node_type_function):\n return get_node_type_function()\n return 'mnresearch.tetgen.PBDBasicGravity'\n @staticmethod\n def compute(context, node):\n db = PBDBasicGravityDatabase(node)\n try:\n db.inputs._setting_locked = True\n compute_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'compute', None)\n if callable(compute_function) and compute_function.__code__.co_argcount > 1:\n return compute_function(context, node)\n return PBDBasicGravityDatabase.NODE_TYPE_CLASS.compute(db)\n except Exception as error:\n stack_trace = \"\".join(traceback.format_tb(sys.exc_info()[2].tb_next))\n db.log_error(f'Assertion raised in compute - {error}\\n{stack_trace}', add_context=False)\n finally:\n db.inputs._setting_locked = False\n return False\n @staticmethod\n def initialize(context, node):\n PBDBasicGravityDatabase._initialize_per_node_data(node)\n\n # Set any default values the attributes have specified\n if not node._do_not_use():\n db = PBDBasicGravityDatabase(node)\n db.inputs.edge = []\n db.inputs.edgesRestLengths = []\n db.inputs.elem = []\n db.inputs.gravity = [0.0, -9.8, 0.0]\n db.inputs.ground = -100.0\n db.inputs.inverseMasses = []\n db.inputs.ks_distance = 1.0\n db.inputs.ks_volume = 1.0\n db.inputs.num_substeps = 8\n db.inputs.points = []\n db.inputs.sim_constraints = 1\n db.inputs.tetrahedronsRestVolumes = []\n db.inputs.velocities = []\n db.inputs.velocity_dampening = 0.1\n initialize_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize', None)\n if callable(initialize_function):\n initialize_function(context, node)\n @staticmethod\n def release(node):\n release_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'release', None)\n if callable(release_function):\n release_function(node)\n PBDBasicGravityDatabase._release_per_node_data(node)\n @staticmethod\n def update_node_version(context, node, old_version, new_version):\n update_node_version_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'update_node_version', None)\n if callable(update_node_version_function):\n return update_node_version_function(context, node, old_version, new_version)\n return False\n @staticmethod\n def initialize_type(node_type):\n initialize_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize_type', None)\n needs_initializing = True\n if callable(initialize_type_function):\n needs_initializing = initialize_type_function(node_type)\n if needs_initializing:\n node_type.set_metadata(og.MetadataKeys.EXTENSION, \"mnresearch.tetgen\")\n node_type.set_metadata(og.MetadataKeys.UI_NAME, \"PBDBasicGravity\")\n node_type.set_metadata(og.MetadataKeys.DESCRIPTION, \"PBDBasicGravity\")\n node_type.set_metadata(og.MetadataKeys.LANGUAGE, \"Python\")\n PBDBasicGravityDatabase.INTERFACE.add_to_node_type(node_type)\n @staticmethod\n def on_connection_type_resolve(node):\n on_connection_type_resolve_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)\n if callable(on_connection_type_resolve_function):\n on_connection_type_resolve_function(node)\n NODE_TYPE_CLASS = None\n GENERATOR_VERSION = (1, 4, 0)\n TARGET_VERSION = (2, 29, 1)\n @staticmethod\n def register(node_type_class):\n PBDBasicGravityDatabase.NODE_TYPE_CLASS = node_type_class\n og.register_node_type(PBDBasicGravityDatabase.abi, 1)\n @staticmethod\n def deregister():\n og.deregister_node_type(\"mnresearch.tetgen.PBDBasicGravity\")\n"},"size":{"kind":"number","value":17984,"string":"17,984"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":46.20472428817658,"string":"46.204724"},"max_line_length":{"kind":"number","value":141,"string":"141"},"alphanum_fraction":{"kind":"number","value":0.6241103202499938,"string":"0.62411"}}},{"rowIdx":9470,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/ogn/nodes/PBDBasicGravity.py"},"content":{"kind":"string","value":"\"\"\"\nThis is the implementation of the OGN node defined in OgnNewNode.ogn\n\"\"\"\n\n# Array or tuple values are accessed as numpy arrays so you probably need this import\nimport math\n\nimport numpy as np\nimport warp as wp\n\nimport omni.timeline\n\nfrom pxr import Usd, UsdGeom, Gf, Sdf\n\n@wp.kernel\ndef boundsKer(predictedPositions: wp.array(dtype=wp.vec3),\n groundLevel: float):\n \n tid = wp.tid()\n\n x = predictedPositions[tid]\n\n if(x[1] < groundLevel):\n predictedPositions[tid] = wp.vec3(x[0], groundLevel, x[2])\n\n@wp.kernel\ndef PBDStepKer(positions: wp.array(dtype=wp.vec3),\n predictedPositions: wp.array(dtype=wp.vec3),\n velocities: wp.array(dtype=wp.vec3),\n dT: float):\n \n tid = wp.tid()\n\n x = positions[tid]\n xPred = predictedPositions[tid]\n\n v = (xPred - x)*(1.0/dT)\n x = xPred\n\n positions[tid] = x\n velocities[tid] = v\n\n@wp.kernel\ndef gravityKer(positions: wp.array(dtype=wp.vec3),\n predictedPositions: wp.array(dtype=wp.vec3),\n velocities: wp.array(dtype=wp.vec3),\n gravityConstant: wp.vec3,\n velocityDampening: float,\n dt: float):\n \n tid = wp.tid()\n\n x = positions[tid]\n v = velocities[tid]\n\n velocityDampening = 1.0 - velocityDampening\n\n v = v + gravityConstant*dt*velocityDampening\n\n xPred = x + v*dt\n\n predictedPositions[tid] = xPred\n\n@wp.kernel\ndef distanceConstraints(predictedPositions: wp.array(dtype=wp.vec3),\n dP: wp.array(dtype=wp.vec3),\n constraintsNumber: wp.array(dtype=int),\n edgesA: wp.array(dtype=int),\n edgesB: wp.array(dtype=int),\n edgesRestLengths: wp.array(dtype=float),\n inverseMasses: wp.array(dtype=float),\n kS: float):\n \n tid = wp.tid()\n\n edgeIndexA = edgesA[tid]\n edgeIndexB = edgesB[tid]\n\n edgePositionA = predictedPositions[edgeIndexA]\n edgePositionB = predictedPositions[edgeIndexB]\n \n edgeRestLength = edgesRestLengths[tid]\n\n dir = edgePositionA - edgePositionB\n len = wp.length(dir)\n\n inverseMass = inverseMasses[edgeIndexA] + inverseMasses[edgeIndexB]\n\n edgeDP = (len-edgeRestLength) * wp.normalize(dir) * kS / inverseMass\n \n wp.atomic_sub(dP, edgeIndexA, edgeDP)\n wp.atomic_add(dP, edgeIndexB, edgeDP)\n\n wp.atomic_add(constraintsNumber, edgeIndexA, 1)\n wp.atomic_add(constraintsNumber, edgeIndexB, 1)\n\n@wp.kernel\ndef volumeConstraints(predictedPositions: wp.array(dtype=wp.vec3),\n dP: wp.array(dtype=wp.vec3),\n constraintsNumber: wp.array(dtype=int),\n tetrahedronsA: wp.array(dtype=int),\n tetrahedronsB: wp.array(dtype=int),\n tetrahedronsC: wp.array(dtype=int),\n tetrahedronsD: wp.array(dtype=int),\n tetrahedronsRestVolumes: wp.array(dtype=float),\n inverseMasses: wp.array(dtype=float),\n kS: float):\n \n tid = wp.tid()\n\n tetrahedronIndexA = tetrahedronsA[tid]\n tetrahedronIndexB = tetrahedronsB[tid]\n tetrahedronIndexC = tetrahedronsC[tid]\n tetrahedronIndexD = tetrahedronsD[tid]\n\n tetrahedronPositionA = predictedPositions[tetrahedronIndexA]\n tetrahedronPositionB = predictedPositions[tetrahedronIndexB]\n tetrahedronPositionC = predictedPositions[tetrahedronIndexC]\n tetrahedronPositionD = predictedPositions[tetrahedronIndexD]\n \n tetrahedronRestVolume = tetrahedronsRestVolumes[tid]\n\n p1 = tetrahedronPositionB - tetrahedronPositionA\n p2 = tetrahedronPositionC - tetrahedronPositionA\n p3 = tetrahedronPositionD - tetrahedronPositionA\n\n q2 = wp.cross(p3, p1)\n q1 = wp.cross(p2, p3)\n q3 = wp.cross(p1, p2)\n q0 = - q1 - q2 - q3\n\n mA = inverseMasses[tetrahedronIndexA]\n mB = inverseMasses[tetrahedronIndexB]\n mC = inverseMasses[tetrahedronIndexC]\n mD = inverseMasses[tetrahedronIndexD]\n\n volume = wp.dot(wp.cross(p1, p2), p3) / 6.0\n\n lambd = mA * wp.dot(q0, q0) + mB * wp.dot(q1, q1) + mC * wp.dot(q2, q2) + mD * wp.dot(q3, q3)\n\n lambd = kS * (volume - tetrahedronRestVolume) / lambd\n\n wp.atomic_sub(dP, tetrahedronIndexA, q0 * lambd * mA)\n wp.atomic_sub(dP, tetrahedronIndexB, q1 * lambd * mB)\n wp.atomic_sub(dP, tetrahedronIndexC, q2 * lambd * mC)\n wp.atomic_sub(dP, tetrahedronIndexD, q3 * lambd * mD)\n\n wp.atomic_add(constraintsNumber, tetrahedronIndexA, 1)\n wp.atomic_add(constraintsNumber, tetrahedronIndexB, 1)\n wp.atomic_add(constraintsNumber, tetrahedronIndexC, 1)\n wp.atomic_add(constraintsNumber, tetrahedronIndexD, 1)\n\n@wp.kernel\ndef applyConstraints(predictedPositions: wp.array(dtype=wp.vec3),\n dP: wp.array(dtype=wp.vec3),\n constraintsNumber: wp.array(dtype=int)):\n \n tid = wp.tid()\n\n if(constraintsNumber[tid] > 0):\n tmpDP = dP[tid]\n N = float(constraintsNumber[tid])\n DP = wp.vec3(tmpDP[0]/N, tmpDP[1]/N, tmpDP[2]/N)\n predictedPositions[tid] = predictedPositions[tid] + DP\n\n dP[tid] = wp.vec3(0.0, 0.0, 0.0)\n constraintsNumber[tid] = 0\n\nclass PBDBasicGravity:\n\n @staticmethod\n def compute(db) -> bool:\n\n timeline = omni.timeline.get_timeline_interface()\n device = \"cuda\"\n\n # # reset on stop\n # if (timeline.is_stopped()):\n # context.reset()\n\n # initialization\n if (timeline.is_playing()):\n\n with wp.ScopedCudaGuard():\n \n gravity = db.inputs.gravity\n velocity_dampening = db.inputs.velocity_dampening\n ground = db.inputs.ground\n kSDistance = db.inputs.ks_distance\n kSVolume = db.inputs.ks_volume\n\n # convert node inputs to a GPU array\n positions = wp.array(db.inputs.points, dtype=wp.vec3, device=device)\n predictedPositions = wp.zeros_like(positions)\n velocities = wp.array(db.inputs.velocities, dtype=wp.vec3, device=device)\n inverseMasses = wp.array(db.inputs.inverseMasses, dtype=float, device=device)\n\n dP = wp.zeros_like(positions)\n constraintsNumber = wp.zeros(len(dP), dtype=int, device=device)\n\n edgesSplit = np.hsplit(db.inputs.edge, 2)\n edgesA = wp.array(edgesSplit[0], dtype=int, device=device)\n edgesB = wp.array(edgesSplit[1], dtype=int, device=device)\n edgesRestLengths = wp.array(db.inputs.edgesRestLengths, dtype=float, device=device)\n\n tetrahedronsSplit = np.hsplit(db.inputs.elem, 4)\n tetrahedronsA = wp.array(tetrahedronsSplit[0], dtype=int, device=device)\n tetrahedronsB = wp.array(tetrahedronsSplit[1], dtype=int, device=device)\n tetrahedronsC = wp.array(tetrahedronsSplit[2], dtype=int, device=device)\n tetrahedronsD = wp.array(tetrahedronsSplit[3], dtype=int, device=device)\n tetrahedronsRestVolumes = wp.array(db.inputs.tetrahedronsRestVolumes, dtype=float, device=device)\n\n # step simulation\n with wp.ScopedTimer(\"Simulate\", active=False):\n # simulate\n sim_substeps = db.inputs.num_substeps\n sim_constraints = db.inputs.sim_constraints\n sim_dt = (1.0/30)/sim_substeps\n\n for i in range(sim_substeps):\n\n # simulate\n wp.launch(kernel=gravityKer,\n dim=len(positions),\n inputs=[positions,\n predictedPositions,\n velocities,\n gravity,\n velocity_dampening,\n sim_dt],\n device=device)\n\n for j in range(sim_constraints):\n\n wp.launch(\n kernel=volumeConstraints,\n dim=len(tetrahedronsA),\n inputs=[predictedPositions,\n dP,\n constraintsNumber,\n tetrahedronsA,\n tetrahedronsB,\n tetrahedronsC,\n tetrahedronsD,\n tetrahedronsRestVolumes,\n inverseMasses,\n kSVolume],\n device=device)\n\n wp.launch(\n kernel=distanceConstraints,\n dim=len(edgesA),\n inputs=[predictedPositions,\n dP,\n constraintsNumber,\n edgesA,\n edgesB,\n edgesRestLengths,\n inverseMasses,\n kSDistance],\n device=device)\n\n wp.launch(\n kernel=applyConstraints,\n dim=len(positions),\n inputs=[predictedPositions,\n dP,\n constraintsNumber],\n device=device)\n\n wp.launch(kernel=boundsKer,\n dim=len(predictedPositions),\n inputs=[predictedPositions,\n ground],\n device=device)\n\n wp.launch(kernel=PBDStepKer,\n dim=len(positions),\n inputs=[positions,\n predictedPositions,\n velocities,\n sim_dt],\n device=device)\n\n # write node outputs\n db.outputs.points = positions.numpy()\n db.outputs.velocities = velocities.numpy()\n\n else:\n \n with wp.ScopedTimer(\"Write\", active=False):\n \n # timeline not playing and sim. not yet initialized, just pass through outputs\n db.outputs.points = db.inputs.points\n db.outputs.velocities = db.inputs.velocities"},"size":{"kind":"number","value":11017,"string":"11,017"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":36.349152419155416,"string":"36.349152"},"max_line_length":{"kind":"number","value":113,"string":"113"},"alphanum_fraction":{"kind":"number","value":0.5182899155379604,"string":"0.51829"}}},{"rowIdx":9471,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/ogn/tests/TestPBDBasicGravity.py"},"content":{"kind":"string","value":"import omni.kit.test\nimport omni.graph.core as og\nimport omni.graph.core.tests as ogts\nimport os\nimport carb\n\n\nclass TestOgn(ogts.test_case_class(use_schema_prims=True, allow_implicit_graph=False)):\n\n async def test_import(self):\n import mnresearch.tetgen.ogn.PBDBasicGravityDatabase\n self.assertTrue(hasattr(mnresearch.tetgen.ogn.PBDBasicGravityDatabase, \"PBDBasicGravityDatabase\"))\n\n async def test_usda(self):\n test_file_name = \"PBDBasicGravityTemplate.usda\"\n usd_path = os.path.join(os.path.dirname(__file__), \"usd\", test_file_name)\n if not os.path.exists(usd_path):\n self.assertTrue(False, f\"{usd_path} not found for loading test\")\n (result, error) = await ogts.load_test_file(usd_path)\n self.assertTrue(result, f'{error} on {usd_path}')\n test_node = og.Controller.node(\"/TestGraph/Template_mnresearch_tetgen_PBDBasicGravity\")\n self.assertTrue(test_node.is_valid())\n node_type_name = test_node.get_type_name()\n self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)\n self.assertTrue(test_node.get_attribute_exists(\"inputs:edge\"))\n\n input_attr = test_node.get_attribute(\"inputs:edge\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edge attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:edgesRestLengths\"))\n\n input_attr = test_node.get_attribute(\"inputs:edgesRestLengths\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edgesRestLengths attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:elem\"))\n\n input_attr = test_node.get_attribute(\"inputs:elem\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:elem attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:gravity\"))\n\n input_attr = test_node.get_attribute(\"inputs:gravity\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([0.0, -9.8, 0.0], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:gravity attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:ground\"))\n\n input_attr = test_node.get_attribute(\"inputs:ground\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(-100.0, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ground attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:inverseMasses\"))\n\n input_attr = test_node.get_attribute(\"inputs:inverseMasses\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:inverseMasses attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:ks_distance\"))\n\n input_attr = test_node.get_attribute(\"inputs:ks_distance\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(1.0, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_distance attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:ks_volume\"))\n\n input_attr = test_node.get_attribute(\"inputs:ks_volume\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(1.0, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_volume attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:num_substeps\"))\n\n input_attr = test_node.get_attribute(\"inputs:num_substeps\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(8, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:num_substeps attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:points\"))\n\n input_attr = test_node.get_attribute(\"inputs:points\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:points attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:sim_constraints\"))\n\n input_attr = test_node.get_attribute(\"inputs:sim_constraints\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(1, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:sim_constraints attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:tetrahedronsRestVolumes\"))\n\n input_attr = test_node.get_attribute(\"inputs:tetrahedronsRestVolumes\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:tetrahedronsRestVolumes attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:velocities\"))\n\n input_attr = test_node.get_attribute(\"inputs:velocities\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocities attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:velocity_dampening\"))\n\n input_attr = test_node.get_attribute(\"inputs:velocity_dampening\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(0.1, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocity_dampening attribute value error\")\n"},"size":{"kind":"number","value":5839,"string":"5,839"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":60.473683573961225,"string":"60.473684"},"max_line_length":{"kind":"number","value":150,"string":"150"},"alphanum_fraction":{"kind":"number","value":0.713649597411603,"string":"0.71365"}}},{"rowIdx":9472,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/ogn/tests/__init__.py"},"content":{"kind":"string","value":"\"\"\"====== GENERATED BY omni.graph.tools - DO NOT EDIT ======\"\"\"\nimport omni.graph.tools as ogt\nogt.import_tests_in_directory(__file__, __name__)\n"},"size":{"kind":"number","value":145,"string":"145"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":35.49999112500222,"string":"35.499991"},"max_line_length":{"kind":"number","value":63,"string":"63"},"alphanum_fraction":{"kind":"number","value":0.6344827542449465,"string":"0.634483"}}},{"rowIdx":9473,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/nodes/PBDBasicGravity.py"},"content":{"kind":"string","value":"\"\"\"\nThis is the implementation of the OGN node defined in OgnNewNode.ogn\n\"\"\"\n\n# Array or tuple values are accessed as numpy arrays so you probably need this import\nimport math\n\nimport numpy as np\nimport warp as wp\n\nimport omni.timeline\n\nfrom pxr import Usd, UsdGeom, Gf, Sdf\n\n@wp.kernel\ndef boundsKer(predictedPositions: wp.array(dtype=wp.vec3),\n groundLevel: float):\n \n tid = wp.tid()\n\n x = predictedPositions[tid]\n\n if(x[1] < groundLevel):\n predictedPositions[tid] = wp.vec3(x[0], groundLevel, x[2])\n\n@wp.kernel\ndef PBDStepKer(positions: wp.array(dtype=wp.vec3),\n predictedPositions: wp.array(dtype=wp.vec3),\n velocities: wp.array(dtype=wp.vec3),\n dT: float):\n \n tid = wp.tid()\n\n x = positions[tid]\n xPred = predictedPositions[tid]\n\n v = (xPred - x)*(1.0/dT)\n x = xPred\n\n positions[tid] = x\n velocities[tid] = v\n\n@wp.kernel\ndef gravityKer(positions: wp.array(dtype=wp.vec3),\n predictedPositions: wp.array(dtype=wp.vec3),\n velocities: wp.array(dtype=wp.vec3),\n gravityConstant: wp.vec3,\n velocityDampening: float,\n dt: float):\n \n tid = wp.tid()\n\n x = positions[tid]\n v = velocities[tid]\n\n velocityDampening = 1.0 - velocityDampening\n\n v = v + gravityConstant*dt*velocityDampening\n\n xPred = x + v*dt\n\n predictedPositions[tid] = xPred\n\n@wp.kernel\ndef distanceConstraints(predictedPositions: wp.array(dtype=wp.vec3),\n dP: wp.array(dtype=wp.vec3),\n constraintsNumber: wp.array(dtype=int),\n edgesA: wp.array(dtype=int),\n edgesB: wp.array(dtype=int),\n edgesRestLengths: wp.array(dtype=float),\n inverseMasses: wp.array(dtype=float),\n kS: float):\n \n tid = wp.tid()\n\n edgeIndexA = edgesA[tid]\n edgeIndexB = edgesB[tid]\n\n edgePositionA = predictedPositions[edgeIndexA]\n edgePositionB = predictedPositions[edgeIndexB]\n \n edgeRestLength = edgesRestLengths[tid]\n\n dir = edgePositionA - edgePositionB\n len = wp.length(dir)\n\n inverseMass = inverseMasses[edgeIndexA] + inverseMasses[edgeIndexB]\n\n edgeDP = (len-edgeRestLength) * wp.normalize(dir) * kS / inverseMass\n \n wp.atomic_sub(dP, edgeIndexA, edgeDP)\n wp.atomic_add(dP, edgeIndexB, edgeDP)\n\n wp.atomic_add(constraintsNumber, edgeIndexA, 1)\n wp.atomic_add(constraintsNumber, edgeIndexB, 1)\n\n@wp.kernel\ndef volumeConstraints(predictedPositions: wp.array(dtype=wp.vec3),\n dP: wp.array(dtype=wp.vec3),\n constraintsNumber: wp.array(dtype=int),\n tetrahedronsA: wp.array(dtype=int),\n tetrahedronsB: wp.array(dtype=int),\n tetrahedronsC: wp.array(dtype=int),\n tetrahedronsD: wp.array(dtype=int),\n tetrahedronsRestVolumes: wp.array(dtype=float),\n inverseMasses: wp.array(dtype=float),\n kS: float):\n \n tid = wp.tid()\n\n tetrahedronIndexA = tetrahedronsA[tid]\n tetrahedronIndexB = tetrahedronsB[tid]\n tetrahedronIndexC = tetrahedronsC[tid]\n tetrahedronIndexD = tetrahedronsD[tid]\n\n tetrahedronPositionA = predictedPositions[tetrahedronIndexA]\n tetrahedronPositionB = predictedPositions[tetrahedronIndexB]\n tetrahedronPositionC = predictedPositions[tetrahedronIndexC]\n tetrahedronPositionD = predictedPositions[tetrahedronIndexD]\n \n tetrahedronRestVolume = tetrahedronsRestVolumes[tid]\n\n p1 = tetrahedronPositionB - tetrahedronPositionA\n p2 = tetrahedronPositionC - tetrahedronPositionA\n p3 = tetrahedronPositionD - tetrahedronPositionA\n\n q2 = wp.cross(p3, p1)\n q1 = wp.cross(p2, p3)\n q3 = wp.cross(p1, p2)\n q0 = - q1 - q2 - q3\n\n mA = inverseMasses[tetrahedronIndexA]\n mB = inverseMasses[tetrahedronIndexB]\n mC = inverseMasses[tetrahedronIndexC]\n mD = inverseMasses[tetrahedronIndexD]\n\n volume = wp.dot(wp.cross(p1, p2), p3) / 6.0\n\n lambd = mA * wp.dot(q0, q0) + mB * wp.dot(q1, q1) + mC * wp.dot(q2, q2) + mD * wp.dot(q3, q3)\n\n lambd = kS * (volume - tetrahedronRestVolume) / lambd\n\n wp.atomic_sub(dP, tetrahedronIndexA, q0 * lambd * mA)\n wp.atomic_sub(dP, tetrahedronIndexB, q1 * lambd * mB)\n wp.atomic_sub(dP, tetrahedronIndexC, q2 * lambd * mC)\n wp.atomic_sub(dP, tetrahedronIndexD, q3 * lambd * mD)\n\n wp.atomic_add(constraintsNumber, tetrahedronIndexA, 1)\n wp.atomic_add(constraintsNumber, tetrahedronIndexB, 1)\n wp.atomic_add(constraintsNumber, tetrahedronIndexC, 1)\n wp.atomic_add(constraintsNumber, tetrahedronIndexD, 1)\n\n@wp.kernel\ndef applyConstraints(predictedPositions: wp.array(dtype=wp.vec3),\n dP: wp.array(dtype=wp.vec3),\n constraintsNumber: wp.array(dtype=int)):\n \n tid = wp.tid()\n\n if(constraintsNumber[tid] > 0):\n tmpDP = dP[tid]\n N = float(constraintsNumber[tid])\n DP = wp.vec3(tmpDP[0]/N, tmpDP[1]/N, tmpDP[2]/N)\n predictedPositions[tid] = predictedPositions[tid] + DP\n\n dP[tid] = wp.vec3(0.0, 0.0, 0.0)\n constraintsNumber[tid] = 0\n\nclass PBDBasicGravity:\n\n @staticmethod\n def compute(db) -> bool:\n\n timeline = omni.timeline.get_timeline_interface()\n device = \"cuda\"\n\n # # reset on stop\n # if (timeline.is_stopped()):\n # context.reset()\n\n # initialization\n if (timeline.is_playing()):\n\n with wp.ScopedCudaGuard():\n \n gravity = db.inputs.gravity\n velocity_dampening = db.inputs.velocity_dampening\n ground = db.inputs.ground\n kSDistance = db.inputs.ks_distance\n kSVolume = db.inputs.ks_volume\n\n # convert node inputs to a GPU array\n positions = wp.array(db.inputs.points, dtype=wp.vec3, device=device)\n predictedPositions = wp.zeros_like(positions)\n velocities = wp.array(db.inputs.velocities, dtype=wp.vec3, device=device)\n inverseMasses = wp.array(db.inputs.inverseMasses, dtype=float, device=device)\n\n dP = wp.zeros_like(positions)\n constraintsNumber = wp.zeros(len(dP), dtype=int, device=device)\n\n edgesSplit = np.hsplit(db.inputs.edge, 2)\n edgesA = wp.array(edgesSplit[0], dtype=int, device=device)\n edgesB = wp.array(edgesSplit[1], dtype=int, device=device)\n edgesRestLengths = wp.array(db.inputs.edgesRestLengths, dtype=float, device=device)\n\n tetrahedronsSplit = np.hsplit(db.inputs.elem, 4)\n tetrahedronsA = wp.array(tetrahedronsSplit[0], dtype=int, device=device)\n tetrahedronsB = wp.array(tetrahedronsSplit[1], dtype=int, device=device)\n tetrahedronsC = wp.array(tetrahedronsSplit[2], dtype=int, device=device)\n tetrahedronsD = wp.array(tetrahedronsSplit[3], dtype=int, device=device)\n tetrahedronsRestVolumes = wp.array(db.inputs.tetrahedronsRestVolumes, dtype=float, device=device)\n\n # step simulation\n with wp.ScopedTimer(\"Simulate\", active=False):\n # simulate\n sim_substeps = db.inputs.num_substeps\n sim_constraints = db.inputs.sim_constraints\n sim_dt = (1.0/30)/sim_substeps\n\n for i in range(sim_substeps):\n\n # simulate\n wp.launch(kernel=gravityKer,\n dim=len(positions),\n inputs=[positions,\n predictedPositions,\n velocities,\n gravity,\n velocity_dampening,\n sim_dt],\n device=device)\n\n for j in range(sim_constraints):\n\n wp.launch(\n kernel=volumeConstraints,\n dim=len(tetrahedronsA),\n inputs=[predictedPositions,\n dP,\n constraintsNumber,\n tetrahedronsA,\n tetrahedronsB,\n tetrahedronsC,\n tetrahedronsD,\n tetrahedronsRestVolumes,\n inverseMasses,\n kSVolume],\n device=device)\n\n wp.launch(\n kernel=distanceConstraints,\n dim=len(edgesA),\n inputs=[predictedPositions,\n dP,\n constraintsNumber,\n edgesA,\n edgesB,\n edgesRestLengths,\n inverseMasses,\n kSDistance],\n device=device)\n\n wp.launch(\n kernel=applyConstraints,\n dim=len(positions),\n inputs=[predictedPositions,\n dP,\n constraintsNumber],\n device=device)\n\n wp.launch(kernel=boundsKer,\n dim=len(predictedPositions),\n inputs=[predictedPositions,\n ground],\n device=device)\n\n wp.launch(kernel=PBDStepKer,\n dim=len(positions),\n inputs=[positions,\n predictedPositions,\n velocities,\n sim_dt],\n device=device)\n\n # write node outputs\n db.outputs.points = positions.numpy()\n db.outputs.velocities = velocities.numpy()\n\n else:\n \n with wp.ScopedTimer(\"Write\", active=False):\n \n # timeline not playing and sim. not yet initialized, just pass through outputs\n db.outputs.points = db.inputs.points\n db.outputs.velocities = db.inputs.velocities"},"size":{"kind":"number","value":11017,"string":"11,017"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":36.349152419155416,"string":"36.349152"},"max_line_length":{"kind":"number","value":113,"string":"113"},"alphanum_fraction":{"kind":"number","value":0.5182899155379604,"string":"0.51829"}}},{"rowIdx":9474,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/nodes/__init__.py"},"content":{"kind":"string","value":"\n\"\"\"\nDynamically import every file in a directory tree that looks like a Python Ogn Node.\nThis includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.\n\"\"\"\nimport omni.graph.core as og\nog.register_ogn_nodes(__file__, \"mnresearch.tetgen\")\n"},"size":{"kind":"number","value":290,"string":"290"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":35.37499557812556,"string":"35.374996"},"max_line_length":{"kind":"number","value":113,"string":"113"},"alphanum_fraction":{"kind":"number","value":0.7689655145897741,"string":"0.768966"}}},{"rowIdx":9475,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/tests/TestPBDBasicGravity.py"},"content":{"kind":"string","value":"import omni.kit.test\nimport omni.graph.core as og\nimport omni.graph.core.tests as ogts\nimport os\nimport carb\n\n\nclass TestOgn(ogts.test_case_class(use_schema_prims=True, allow_implicit_graph=False)):\n\n async def test_import(self):\n import mnresearch.tetgen.ogn.PBDBasicGravityDatabase\n self.assertTrue(hasattr(mnresearch.tetgen.ogn.PBDBasicGravityDatabase, \"PBDBasicGravityDatabase\"))\n\n async def test_usda(self):\n test_file_name = \"PBDBasicGravityTemplate.usda\"\n usd_path = os.path.join(os.path.dirname(__file__), \"usd\", test_file_name)\n if not os.path.exists(usd_path):\n self.assertTrue(False, f\"{usd_path} not found for loading test\")\n (result, error) = await ogts.load_test_file(usd_path)\n self.assertTrue(result, f'{error} on {usd_path}')\n test_node = og.Controller.node(\"/TestGraph/Template_mnresearch_tetgen_PBDBasicGravity\")\n self.assertTrue(test_node.is_valid())\n node_type_name = test_node.get_type_name()\n self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)\n self.assertTrue(test_node.get_attribute_exists(\"inputs:edge\"))\n\n input_attr = test_node.get_attribute(\"inputs:edge\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edge attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:edgesRestLengths\"))\n\n input_attr = test_node.get_attribute(\"inputs:edgesRestLengths\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edgesRestLengths attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:elem\"))\n\n input_attr = test_node.get_attribute(\"inputs:elem\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:elem attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:gravity\"))\n\n input_attr = test_node.get_attribute(\"inputs:gravity\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([0.0, -9.8, 0.0], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:gravity attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:ground\"))\n\n input_attr = test_node.get_attribute(\"inputs:ground\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(-100.0, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ground attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:inverseMasses\"))\n\n input_attr = test_node.get_attribute(\"inputs:inverseMasses\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:inverseMasses attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:ks_distance\"))\n\n input_attr = test_node.get_attribute(\"inputs:ks_distance\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(1.0, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_distance attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:ks_volume\"))\n\n input_attr = test_node.get_attribute(\"inputs:ks_volume\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(1.0, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_volume attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:num_substeps\"))\n\n input_attr = test_node.get_attribute(\"inputs:num_substeps\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(8, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:num_substeps attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:points\"))\n\n input_attr = test_node.get_attribute(\"inputs:points\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:points attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:sim_constraints\"))\n\n input_attr = test_node.get_attribute(\"inputs:sim_constraints\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(1, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:sim_constraints attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:tetrahedronsRestVolumes\"))\n\n input_attr = test_node.get_attribute(\"inputs:tetrahedronsRestVolumes\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:tetrahedronsRestVolumes attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:velocities\"))\n\n input_attr = test_node.get_attribute(\"inputs:velocities\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values([], actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocities attribute value error\")\n self.assertTrue(test_node.get_attribute_exists(\"inputs:velocity_dampening\"))\n\n input_attr = test_node.get_attribute(\"inputs:velocity_dampening\")\n actual_input = og.Controller.get(input_attr)\n ogts.verify_values(0.1, actual_input, \"mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocity_dampening attribute value error\")\n"},"size":{"kind":"number","value":5839,"string":"5,839"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":60.473683573961225,"string":"60.473684"},"max_line_length":{"kind":"number","value":150,"string":"150"},"alphanum_fraction":{"kind":"number","value":0.713649597411603,"string":"0.71365"}}},{"rowIdx":9476,"cells":{"file_path":{"kind":"string","value":"mnaskret/omni-tetGen/mnresearch/tetgen/tests/__init__.py"},"content":{"kind":"string","value":"\"\"\"====== GENERATED BY omni.graph.tools - DO NOT EDIT ======\"\"\"\nimport omni.graph.tools as ogt\nogt.import_tests_in_directory(__file__, __name__)\n"},"size":{"kind":"number","value":145,"string":"145"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":35.49999112500222,"string":"35.499991"},"max_line_length":{"kind":"number","value":63,"string":"63"},"alphanum_fraction":{"kind":"number","value":0.6344827542449465,"string":"0.634483"}}},{"rowIdx":9477,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/README.md"},"content":{"kind":"string","value":"# RTXRemixTools\nThese are some tools I've made that are intended for use with Nvidia's RTX Remix. Right now I have 3:\n\n* **MagicUSDA** - Allows you to generate .usda files based on your gameReadyAssets folder\n\n* **LightAdjuster** - A simple script that allows you to adjust light intensity and color temperature in a specified .usda file\n\n* **RemixMeshConvert** - This script will convert meshes to be (more) compatible with Remix\n\nThese should hopefully help with setting up mods for Remix quickly and easily.\n"},"size":{"kind":"number","value":511,"string":"511"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":45.545450404959055,"string":"45.54545"},"max_line_length":{"kind":"number","value":127,"string":"127"},"alphanum_fraction":{"kind":"number","value":0.7729941276458041,"string":"0.772994"}}},{"rowIdx":9478,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/LightAdjuster/LightAdjuster.py"},"content":{"kind":"string","value":"import argparse\n\ndef adjust_value(line, value_name, percentage, log_changes, i):\n if f'float {value_name} =' in line:\n parts = line.split('=')\n old_value = float(parts[1].strip())\n new_value = old_value * percentage\n new_line = f'{parts[0]}= {new_value}\\n'\n if log_changes:\n log_line = f'Line {i + 1}: {line.strip()} -> {new_line.strip()}'\n print(log_line)\n with open('changes.log', 'a') as log:\n log.write(log_line + '\\n')\n line = new_line\n return line, True\n return line, False\n\ndef adjust_file(file_path, start_line=1, log_changes=False, adjust_intensity=False, adjust_color_temperature=False, percentage=None):\n with open(file_path, 'r') as file:\n data = file.readlines()\n lines_changed = 0\n with open(file_path, 'w') as file:\n for i, line in enumerate(data):\n if i + 1 >= start_line:\n if adjust_intensity:\n line, changed = adjust_value(line, 'intensity', percentage, log_changes, i)\n if changed:\n lines_changed += 1\n if adjust_color_temperature:\n line, changed = adjust_value(line, 'colorTemperature', percentage, log_changes, i)\n if changed:\n lines_changed += 1\n file.write(line)\n print(f'Completed! {lines_changed} lines changed.')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Adjust the intensity and/or color temperature values in a file.')\n parser.add_argument('file_path', type=str, help='The path to the file to modify.')\n parser.add_argument('-s', '--start-line', type=int, default=1, help='The line number to start modifying at.')\n parser.add_argument('-l', '--log', action='store_true', help='Whether to print a log of the changed lines.')\n parser.add_argument('-ai', '--adjust-intensity', action='store_true', help='Whether to adjust the intensity value.')\n parser.add_argument('-act', '--adjust-color-temperature', action='store_true', help='Whether to adjust the color temperature value.')\n parser.add_argument('-p', '--percentage', type=float, required=True, help='The percentage to adjust the value by.')\n args = parser.parse_args()\n adjust_file(args.file_path, args.start_line, args.log, args.adjust_intensity, args.adjust_color_temperature, args.percentage)\n"},"size":{"kind":"number","value":2440,"string":"2,440"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":52.06521625945182,"string":"52.065216"},"max_line_length":{"kind":"number","value":137,"string":"137"},"alphanum_fraction":{"kind":"number","value":0.6098360653238377,"string":"0.609836"}}},{"rowIdx":9479,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/LightAdjuster/README.md"},"content":{"kind":"string","value":"# **Remix Light Adjuster**\n\n*Written with the assistance of Bing*\n\nThis script adjusts the intensity and/or color temperature values in a file. \n\n$\\color{#f7d26a}{\\textsf{Please back up your usda files before running!}}$\n\n## Usage\n\nTo use this script, run the following command:\n\n`python LightAdjuster.py file_path`\n\nwhere `file_path` is the path to the .usda file to modify.\n\nThere are several additional options that can be used with this script:\n\n* `-s` or `--start-line` - This option allows you to specify the line number to start modifying at. The default value is 1.\n* `-l` or `--log` - This option enables logging of the changed lines. If this option is used, a log of the changed lines will be printed to the console and written to a file named `changes.log`.\n* `-p` or `--percentage` - This option specifies the percentage to adjust the value by. This option is required.\n* `-ai` or `--adjust-intensity` - This option enables adjustment of the intensity value using `-p`.\n* `-act` or `--adjust-color-temperature` - This option enables adjustment of the color temperature value using `-p`.\n\nFor example, to adjust the intensity value in a file named `data.txt`, starting at line 5, and logging the changes, you would run the following command:\n\n`python adjust_file.py data.txt -s 5 -l -ai -p 0.5`\n\nThis would adjust the intensity value in all lines containing `float intensity =`, starting at line 5, by multiplying it by 0.5. A log of the changed lines would be printed to the console and written to a file named `changes.log`.\n\n## Description\n\nThis script reads the specified file and modifies lines that contain either `float intensity =` or `float colorTemperature =`, depending on which value is being adjusted. The value is multiplied by the specified percentage and the line is updated with the new value. If logging is enabled, a log of the changed lines is printed to the console and written to a file named `changes.log`.\n\nAfter all lines have been processed, the script prints a message indicating how many lines were changed.\n"},"size":{"kind":"number","value":2047,"string":"2,047"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":55.8888873364198,"string":"55.888887"},"max_line_length":{"kind":"number","value":385,"string":"385"},"alphanum_fraction":{"kind":"number","value":0.755251587320346,"string":"0.755252"}}},{"rowIdx":9480,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/MagicUSDA/README.md"},"content":{"kind":"string","value":"# Remix USDA Generator\n*Written with the assistance of Bing and ChatGPT*\n\n$\\color{#f7d26a}{\\textsf{Please back up your usda files to a separate folder before running!}}$\n\nThis is a script to generate `.usda` files from your gameReadyAssets folder. It detects any of these map types in your folder:\n- emissive\n- normal\n- metallic\n- rough\n\n## Usage\nHow to use this script:\n`python MagicUSDA.py -d path\\to\\gameReadyAssets`\n\nThere are some additional functions:\n\n* `-o` - Change the output usda file names.\n* `-m` - Split the output USDA files into separate entries for each map type (e.g. mod_emissive.usda, mod_metallic.usda). Works with `-o` to change the base file name.\n* `-a` - Add sublayers made with `-m` to the mod.usda file. Not compatible with custom files specified by `-o`, will only modify mod.usda. Works with `-m` and `-o`.\n* `-g` - Toggle generating hashes for file names before the suffix. Useful for files with generic names like test.dds. Diffuse textures must be identical to Remix dumps.\n* `-s` - Change between the AperturePBR_Opacity and AperturePBR_Translucent material shader types. Using this, you can generate separate .usda files for normal or translucent objects easily\n* `-r` _**Currently broken**_ - Specify a separate folder to use as a reference for generating diffuse texture hashes. Searches for files in the reference directory based on file names from the base directory. If not provided, uses the main directory to generate hashes. Useful with folders like captures or game texture rips.\n\nThe `.usda` files generated by this script serve to replace textures in your Remix games, allowing you to swap out textures and utilize additional map types to enhance the game's visuals.\n\nThis script is intended to be used with original diffuse textures, which are required for it to function correctly. It generates a `mod.usda` file for use in your game through Remix. It was designed with [chaiNNer](https://chainner.app/) in mind, however you can use this with any textures you've created. Be aware that this script will overwrite any pre-existing `mod.usda` files in your directory!\n"},"size":{"kind":"number","value":2113,"string":"2,113"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":74.49999733928581,"string":"74.499997"},"max_line_length":{"kind":"number","value":399,"string":"399"},"alphanum_fraction":{"kind":"number","value":0.7690487454950077,"string":"0.769049"}}},{"rowIdx":9481,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/MagicUSDA/MagicUSDA.py"},"content":{"kind":"string","value":"import os\nimport argparse\nimport xxhash\nfrom pxr import Usd, UsdGeom, UsdShade, Sdf\n\nsuffixes = [\"_normal\", \"_emissive\", \"_metallic\", \"_rough\"]\n\n\ndef generate_hashes(file_path) -> str:\n # Read the file and extract the raw data. Thanks @BlueAmulet!\n with open(file_path, \"rb\") as file:\n data = file.read(128)\n\n dwHeight = int.from_bytes(data[12:16], \"little\")\n dwWidth = int.from_bytes(data[16:20], \"little\")\n pfFlags = int.from_bytes(data[80:84], \"little\")\n pfFourCC = data[84:88]\n bitCount = int.from_bytes(data[88:92], \"little\")\n\n mipsize = dwWidth * dwHeight\n if pfFlags & 0x4: # DDPF_FOURCC\n if pfFourCC == b\"DXT1\": # DXT1 is 4bpp\n mipsize //= 2\n elif pfFlags & 0x20242: # DDPF_ALPHA | DDPF_RGB | DDPF_YUV | DDPF_LUMINANCE\n mipsize = mipsize * bitCount // 8\n\n # Read the required portion of the file for hash calculation\n with open(file_path, \"rb\") as file:\n file.seek(128) # Move the file pointer to the appropriate position\n data = file.read(mipsize)\n\n hash_value = xxhash.xxh3_64(data).hexdigest()\n\n return hash_value.upper()\n\n\ndef write_usda_file(args, file_list, suffix=None) -> [list, list]:\n created_files = []\n modified_files = []\n game_ready_assets_path = os.path.join(args.directory)\n\n # Check if there are any texture files with the specified suffix\n if suffix:\n has_suffix_files = False\n for file_name in file_list:\n if file_name.endswith(f\"{suffix}.dds\"):\n has_suffix_files = True\n break\n if not has_suffix_files:\n # return a blank set\n return [created_files, modified_files]\n\n usda_file_name = f'{args.output}{suffix if suffix else \"\"}.usda'\n usda_file_path = os.path.join(game_ready_assets_path, usda_file_name)\n\n if os.path.exists(usda_file_path):\n modified_files.append(usda_file_path)\n else:\n created_files.append(usda_file_path)\n\n targets = {}\n\n reference_directory = args.reference_directory if args.reference_directory else args.directory\n \n for file_name in file_list:\n if file_name.endswith(\".dds\"):\n # Extract only the file name from the absolute path\n name = os.path.basename(file_name)\n name, ext = os.path.splitext(name)\n if \"_\" not in name or name.endswith(\"_diffuse\") or name.endswith(\"_albedo\"):\n # Check if the generate_hashes argument is specified\n if args.generate_hashes:\n key = name.split(\"_\")[0] # Use the prefix of the diffuse file name as the key\n hash_value = generate_hashes(os.path.join(reference_directory, file_name)) # Generate hash for the diffuse file\n else:\n key = os.path.basename(name)\n hash_value = key # Use the original name as the hash value\n # Check if the key contains a hash or ends with _diffuse or _albedo\n if not (key.isupper() and len(key) == 16) and not (key.endswith(\"_diffuse\") or key.endswith(\"_albedo\")):\n continue\n # Remove the _diffuse or _albedo suffix from the key and hash_value\n key = key.replace(\"_diffuse\", \"\").replace(\"_albedo\", \"\")\n hash_value = hash_value.replace(\"_diffuse\", \"\").replace(\"_albedo\", \"\")\n # Get the relative path from the game ready assets path to the texture file\n rel_file_path = os.path.relpath(file_name, args.directory)\n targets[key] = (rel_file_path, hash_value)\n\n # Create a new stage\n stage = Usd.Stage.CreateNew(usda_file_path)\n\n # Modify the existing RootNode prim\n root_node_prim = stage.OverridePrim(\"/RootNode\")\n\n # Add a Looks scope as a child of the RootNode prim\n looks_scope = UsdGeom.Scope.Define(stage, \"/RootNode/Looks\")\n \n added_targets = set()\n for value, (rel_file_path, hash_value) in targets.items():\n # Check if there is a corresponding texture file for the specified suffix\n if suffix and not any(\n file_name.endswith(f\"{value}{suffix}.dds\") for file_name in file_list\n ): continue\n if value in added_targets:\n continue\n else:\n added_targets.add(value)\n print(f\"Adding texture {rel_file_path} with hash: {hash_value}\")\n\n # Add a material prim as a child of the Looks scope\n material_prim = UsdShade.Material.Define(\n stage, f\"/RootNode/Looks/mat_{hash_value.upper()}\"\n )\n material_prim.GetPrim().GetReferences().SetReferences([])\n\n # Set the shader attributes\n shader_prim = UsdShade.Shader.Define(\n stage, f\"/RootNode/Looks/mat_{hash_value.upper()}/Shader\"\n )\n shader_prim.GetPrim().CreateAttribute(\"info:mdl:sourceAsset\", Sdf.ValueTypeNames.Asset).Set(\n f\"{args.shader_type}.mdl\"\n )\n shader_prim.GetPrim().CreateAttribute(\"info:implementationSource\", Sdf.ValueTypeNames.Token).Set(\n \"sourceAsset\"\n )\n shader_prim.GetPrim().CreateAttribute(\"info:mdl:sourceAsset:subIdentifier\", Sdf.ValueTypeNames.Token).Set(\n f\"{args.shader_type}\"\n )\n\n shader_output = shader_prim.CreateOutput(\"output\", Sdf.ValueTypeNames.Token)\n\n if not suffix or suffix == \"_diffuse\" or suffix == \"_albedo\":\n diffuse_texture = shader_prim.CreateInput(\n \"diffuse_texture\", Sdf.ValueTypeNames.Asset\n )\n # Use the dynamically generated relative path for the diffuse texture\n diffuse_texture.Set(f\".\\{rel_file_path}\")\n \n # Process each type of texture\n if not suffix or suffix == \"_emissive\":\n emissive_file_name = f\"{value}_emissive.dds\"\n # print(f\"Emissive File Name: {emissive_file_name in file_list}\")\n # print(file_list)\n if any(file_path.endswith(emissive_file_name) for file_path in file_list):\n emissive_mask_texture = shader_prim.CreateInput(\n \"emissive_mask_texture\", Sdf.ValueTypeNames.Asset\n )\n # Use the dynamically generated relative path for the emissive texture\n emissive_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), emissive_file_name), args.directory)\n emissive_mask_texture.Set(f\".\\{emissive_rel_file_path}\")\n enable_emission = shader_prim.CreateInput(\n \"enable_emission\", Sdf.ValueTypeNames.Bool\n )\n enable_emission.Set(True)\n emissive_intensity = shader_prim.CreateInput(\n \"emissive_intensity\", Sdf.ValueTypeNames.Float\n )\n emissive_intensity.Set(5)\n\n if not suffix or suffix == \"_metallic\":\n metallic_file_name = f\"{value}_metallic.dds\"\n if any(file_path.endswith(metallic_file_name) for file_path in file_list):\n\n metallic_texture = shader_prim.CreateInput(\n \"metallic_texture\", Sdf.ValueTypeNames.Asset\n )\n # Use the dynamically generated relative path for the metallic texture\n metallic_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), metallic_file_name), args.directory)\n metallic_texture.Set(f\".\\{metallic_rel_file_path}\")\n\n if not suffix or suffix == \"_normal\":\n normal_file_name = f\"{value}_normal.dds\"\n if any(file_path.endswith(normal_file_name) for file_path in file_list):\n normalmap_texture = shader_prim.CreateInput(\n \"normal_texture\", Sdf.ValueTypeNames.Asset\n )\n # Use the dynamically generated relative path for the normal texture\n normal_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), normal_file_name), args.directory)\n normalmap_texture.Set(f\".\\{normal_rel_file_path}\")\n\n if not suffix or suffix == \"_rough\":\n roughness_file_name = f\"{value}_rough.dds\"\n if any(file_path.endswith(roughness_file_name) for file_path in file_list):\n reflectionroughness_texture = shader_prim.CreateInput(\n \"reflectionroughness_texture\", Sdf.ValueTypeNames.Asset\n )\n # Use the dynamically generated relative path for the roughness texture\n roughness_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), roughness_file_name), args.directory)\n reflectionroughness_texture.Set(f\".\\{roughness_rel_file_path}\")\n\n # Connect shader output to material inputs\n material_prim.CreateInput(\n \"mdl:displacement\", Sdf.ValueTypeNames.Token\n ).ConnectToSource(shader_output)\n material_prim.CreateInput(\n \"mdl:surface\", Sdf.ValueTypeNames.Token\n ).ConnectToSource(shader_output)\n material_prim.CreateInput(\n \"mdl:volume\", Sdf.ValueTypeNames.Token\n ).ConnectToSource(shader_output)\n\n # Save the stage\n stage.Save()\n \n return [modified_files, created_files]\n\n\ndef add_sublayers(args, file_list) -> list:\n modified_files = []\n game_ready_assets_path = os.path.join(args.directory)\n mod_file_path = os.path.join(game_ready_assets_path, \"mod.usda\")\n if os.path.exists(mod_file_path):\n modified_files.append(mod_file_path)\n\n # Open the existing stage\n stage = Usd.Stage.Open(mod_file_path)\n\n # Get the existing sublayers\n existing_sublayers = list(stage.GetRootLayer().subLayerPaths)\n\n # Create a set of existing sublayer file names\n existing_sublayer_files = {\n os.path.basename(sublayer_path) for sublayer_path in existing_sublayers\n }\n\n # Add new sublayers\n new_sublayers = [\n f\"./{args.output}{suffix}.usda\"\n for suffix in suffixes\n if f\"{args.output}{suffix}.usda\" not in existing_sublayer_files\n and any(\n os.path.basename(file_path) == f\"{args.output}{suffix}.usda\"\n for file_path in file_list\n )\n ]\n stage.GetRootLayer().subLayerPaths = (existing_sublayers + new_sublayers)\n\n # Save the stage\n stage.Save()\n\n return modified_files\n\n\nif __name__ == \"__main__\":\n # ARGUMENT BLOCK\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--directory\", required=True, help=\"Path to directory\")\n parser.add_argument(\"-o\", \"--output\", default=\"mod\", help=\"Output file name\")\n parser.add_argument(\"-g\", \"--generate-hashes\", action=\"store_true\", help=\"Generates hashes for file names before the suffix\")\n parser.add_argument(\"-m\", \"--multiple-files\", action=\"store_true\", help=\"Save multiple .usda files, one for each suffix type (except for diffuse)\")\n parser.add_argument(\"-a\", \"--add-sublayers\", action=\"store_true\", help=\"Add sublayers made with -m to the mod.usda file. This argument only modifies the mod.usda file and does not affect any custom USDA file specified by the -o argument.\")\n parser.add_argument(\"-s\", \"--shader-type\", default=\"AperturePBR_Opacity\", choices=[\"AperturePBR_Opacity\", \"AperturePBR_Translucent\"], help=\"Shader type\")\n parser.add_argument(\"-r\", \"--reference-directory\", help=\"Path to reference directory for diffuse texture hashes\")\n args = parser.parse_args()\n \n # Check target processing directory before use\n if not os.path.isdir(args.directory):\n raise FileNotFoundError(\"Specified processing directory (-d) is invalid\")\n \n # Recursively scan folders\n file_list = []\n for root, dirs, files in os.walk(args.directory):\n for file in files:\n file_list.append(os.path.join(root, file))\n created_files = []\n modified_files = []\n \n # Process sublayer additions\n print(f\"Add Sublayers: {args.add_sublayers}\")\n if args.add_sublayers:\n modified_files.extend(add_sublayers(args, file_list))\n \n # Generate unique USDA files per suffix type (except diffuse)\n if args.multiple_files:\n for suffix in suffixes:\n m, c = write_usda_file(args, file_list, suffix)\n modified_files.extend(m), created_files.extend(c)\n else: # Generate a single USDA file for all suffixes\n m, c = write_usda_file(args, file_list)\n modified_files.extend(m), created_files.extend(c)\n \n # Complete\n print(\"Finished!\")\n print(\"Created files:\")\n for file in created_files:\n print(f\" - {file}\")\n print(\"Modified files:\")\n for file in modified_files:\n print(f\" - {file}\")\n"},"size":{"kind":"number","value":12785,"string":"12,785"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":43.242214383244935,"string":"43.242214"},"max_line_length":{"kind":"number","value":243,"string":"243"},"alphanum_fraction":{"kind":"number","value":0.6157215486417112,"string":"0.615722"}}},{"rowIdx":9482,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/RemixMeshConvert/RemixMeshConvert.py"},"content":{"kind":"string","value":"import argparse\nimport logging\nimport os\nimport shutil\nimport sys\n\nfrom pxr import Usd, UsdGeom, Gf, Sdf\n\nALIASES = {\n \"primvars:UVMap\": (\"primvars:st\", Sdf.ValueTypeNames.Float2Array),\n \"primvars:UVChannel_1\": (\"primvars:st1\", Sdf.ValueTypeNames.Float2Array),\n \"primvars:map1\": (\"primvars:st1\", Sdf.ValueTypeNames.Float2Array),\n # Add more aliases here\n}\n\n\ndef convert_face_varying_to_vertex_interpolation(usd_file_path):\n stage = Usd.Stage.Open(usd_file_path)\n mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]\n for prim in mesh_prims:\n mesh = UsdGeom.Mesh(prim)\n indices = prim.GetAttribute(\"faceVertexIndices\")\n points = prim.GetAttribute(\"points\")\n \n if not indices or not points:\n continue # Skip if the required attributes are missing\n \n points_arr = points.Get()\n\n modified_points = [points_arr[i] for i in indices.Get()]\n points.Set(modified_points)\n\n indices.Set([i for i in range(len(indices.Get()))])\n\n mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)\n primvar_api = UsdGeom.PrimvarsAPI(prim)\n for var in primvar_api.GetPrimvars():\n if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:\n var.SetInterpolation(UsdGeom.Tokens.vertex)\n\n # Replace aliases with \"float2[] primvars:st\"\n if var.GetName() in ALIASES:\n new_name, new_type_name = ALIASES[var.GetName()]\n new_var = primvar_api.GetPrimvar(new_name)\n if new_var:\n new_var.Set(var.Get())\n else:\n new_var = primvar_api.CreatePrimvar(new_name, new_type_name)\n new_var.Set(var.Get())\n new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex\n \n primvar_api.RemovePrimvar(var.GetBaseName())\n\n return stage\n\n\ndef process_folder(input_folder, output_folder, output_extension=None):\n for file_name in os.listdir(input_folder):\n input_file = os.path.join(input_folder, file_name)\n if output_extension:\n file_name = os.path.splitext(file_name)[0] + '.' + output_extension\n output_file = os.path.join(output_folder, file_name)\n\n if not os.path.isfile(input_file):\n continue\n\n shutil.copy(input_file, output_file) # Make a copy of the input file and rename it to the output file\n stage = convert_face_varying_to_vertex_interpolation(output_file)\n stage.Save() # Modify the output file in place\n logging.info(f\"Processed file: {input_file} -> {output_file}\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Convert USD file formats and interpolation of meshes.')\n parser.add_argument('input', type=str, help='Input file or folder path')\n parser.add_argument('output', type=str, help='Output file or folder path')\n parser.add_argument('-f', '--format', type=str, choices=['usd', 'usda'], help='Output file format (usd or usda)')\n args = parser.parse_args()\n\n input_path = args.input\n output_path = args.output\n output_extension = args.format\n\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n\n if os.path.isdir(input_path):\n process_folder(input_path, output_path, output_extension)\n else:\n if output_extension:\n output_path = os.path.splitext(output_path)[0] + '.' + output_extension\n shutil.copy(input_path, output_path) # Make a copy of the input file and rename it to the output file\n stage = convert_face_varying_to_vertex_interpolation(output_path)\n stage.Save() # Modify the output file in place\n logging.info(f\"Processed file: {input_path} -> {output_path}\")\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":3853,"string":"3,853"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":37.92929254616876,"string":"37.929293"},"max_line_length":{"kind":"number","value":117,"string":"117"},"alphanum_fraction":{"kind":"number","value":0.6379444586976526,"string":"0.637944"}}},{"rowIdx":9483,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/RemixMeshConvert/README.md"},"content":{"kind":"string","value":"## RemixMeshConvert\n$\\color{#f7d26a}{\\textsf{Use this instead. It integrates directly into Omniverse:}}$ https://github.com/Ekozmaster/NvidiaOmniverseRTXRemixTools\n\n\n\n\n\n\n Old description:\n \n*Based on a script originally written by E-man*\n\n$\\color{#f7d26a}{\\textsf{Please back up your USD and USDA files before running!}}$\n\n**How to use this script:**\n\nTo convert a single file:\n\n`python RemixMeshConvert.py [input.usda] [output.usda]`\n\nTo batch convert a folder:\n\n`python RemixMeshConvert.py path\\to\\input\\folder path\\to\\output\\folder -f [usd or usda]`\n\n**Arguments:**\n\n`-f` `--output-format` - This controls the output format when using the script in **batch** mode\n\n**Description:**\n\nThis script takes USD files as input, makes a copy named as the output, converts the interpolation of all meshes in the given USD file from face-varying to vertex, and finally saves the modified stages to the new USD files. It can process a single file or a folder of files, and also includes a dictionary of aliases for replacing specific primvar names with `float2[] primvars:st1`.\n\n**For your final exports to use in-game, please save as USD! USDA files are very inefficient in comparison**\n\nPlease refer to `requirements.txt` for necessary Python libraries.\n\n"},"size":{"kind":"number","value":1289,"string":"1,289"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":33.86486394959827,"string":"33.864864"},"max_line_length":{"kind":"number","value":383,"string":"383"},"alphanum_fraction":{"kind":"number","value":0.7579519001102002,"string":"0.757952"}}},{"rowIdx":9484,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/RemixMeshConvert/For USD Composer/RemixMeshConvert_OV.py"},"content":{"kind":"string","value":"from pxr import Usd, UsdGeom, Sdf\n\nALIASES = {\n \"primvars:UVMap\": (\"primvars:st\", Sdf.ValueTypeNames.Float2Array),\n \"primvars:UVChannel_1\": (\"primvars:st1\", Sdf.ValueTypeNames.Float2Array),\n \"primvars:map1\": (\"primvars:st1\", Sdf.ValueTypeNames.Float2Array),\n # Add more aliases here\n}\n\ndef convert_face_varying_to_vertex_interpolation(stage):\n mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]\n for prim in mesh_prims:\n mesh = UsdGeom.Mesh(prim)\n indices = prim.GetAttribute(\"faceVertexIndices\")\n points = prim.GetAttribute(\"points\")\n \n if not indices or not points:\n continue # Skip if the required attributes are missing\n \n points_arr = points.Get()\n\n modified_points = [points_arr[i] for i in indices.Get()]\n points.Set(modified_points)\n\n indices.Set([i for i in range(len(indices.Get()))])\n\n mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)\n primvar_api = UsdGeom.PrimvarsAPI(prim)\n for var in primvar_api.GetPrimvars():\n if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:\n var.SetInterpolation(UsdGeom.Tokens.vertex)\n\n # Replace aliases with \"float2[] primvars:st\"\n if var.GetName() in ALIASES:\n new_name, new_type_name = ALIASES[var.GetName()]\n new_var = primvar_api.GetPrimvar(new_name)\n if new_var:\n new_var.Set(var.Get())\n else:\n new_var = primvar_api.CreatePrimvar(new_name, new_type_name)\n new_var.Set(var.Get())\n new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex\n \n # Remove the old primvar directly from the UsdGeomPrimvar object\n var.GetAttr().Block()\n\n return stage\n\nstage = omni.usd.get_context().get_stage()\nconvert_face_varying_to_vertex_interpolation(stage)\n"},"size":{"kind":"number","value":1995,"string":"1,995"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":38.13725415417149,"string":"38.137254"},"max_line_length":{"kind":"number","value":97,"string":"97"},"alphanum_fraction":{"kind":"number","value":0.6140350874115112,"string":"0.614035"}}},{"rowIdx":9485,"cells":{"file_path":{"kind":"string","value":"Kim2091/RTXRemixTools/RemixMeshConvert/For USD Composer/README.md"},"content":{"kind":"string","value":"## RemixMeshConvert\n*Based on a script originally written by E-man*\n\n$\\color{#f7d26a}{\\textsf{Please back up your USD and USDA files before running!}}$\n\n**How to use this script:**\n* Install USD Composer: https://www.nvidia.com/en-us/omniverse/apps/create/\n* Once launched, open the Script Editor in Window > Script Editor\n* Load your mesh files by dragging it into the pane on the right\n* Run the script\n\nFor more information, look at [this thread](https://discord.com/channels/1028444667789967381/1096847508002590760/1123306156773879928) in the [RTX Remix Showcase server](https://discord.gg/rtxremix)\n \n\n**Description:**\n\nThe RemixMeshConvert_OV script is only for usage within Omniverse's USD Composer. If you want to process files and folders independently of Omniverse, use RemixMeshConvert in the directory above this one.\n\n**For your final exports to use in-game, please save as USD! USDA files are very inefficient in comparison**\n"},"size":{"kind":"number","value":941,"string":"941"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":46.09999769500011,"string":"46.099998"},"max_line_length":{"kind":"number","value":204,"string":"204"},"alphanum_fraction":{"kind":"number","value":0.7768331553912506,"string":"0.776833"}}},{"rowIdx":9486,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/objects_position_normal_90.py"},"content":{"kind":"string","value":"import omni.replicator.core as rep\n\nwith rep.new_layer():\n\n # Load in asset\n local_path = \"/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/\"\n TABLE_USD = f\"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd\"\n SPOON_SMALL_USD = f\"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd\"\n SPOON_BIG_USD = f\"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd\"\n FORK_SMALL_USD = f\"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd\"\n FORK_BIG_USD = f\"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd\"\n KNIFE_USD = f\"{local_path}/asset/Collected_Knife/Knife.usd\"\n\n # Camera paramters\n cam_position = (46, 200, 25)\n cam_position2 = (46, 120, 25)\n cam_position_random = rep.distribution.uniform((0, 181, 0), (0, 300, 0))\n cam_rotation = (-90, 0, 0)\n focus_distance = 114\n focus_distance2 = 39.1\n focal_length = 27\n focal_length2 = 18.5\n f_stop = 1.8\n f_stop2 = 1.8\n focus_distance_random = rep.distribution.normal(500.0, 100)\n\n # Cultery path\n current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD\n output_path = current_cultery.split(\".\")[0].split(\"/\")[-1]\n\n def rect_lights(num=1):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(45, 110, 0),\n rotation=(-90, 0, 0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node\n\n def dome_lights(num=3):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(45, 120, 18),\n rotation=(225, 0, 0),\n count=num\n )\n return lights.node\n\n def table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(46, -0.0, 20),\n rotation=(0, -90, -90),\n )\n return table\n\n # Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.\n def cutlery_props(size=15):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(\n current_cultery), size=size, mode='point_instance')\n\n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform(\n (0, 76.3651, 0), (90, 76.3651, 42)),\n rotation=rep.distribution.uniform(\n (-90, -180, 0), (-90, 180, 0)),\n )\n return instances.node\n\n # Register randomization\n rep.randomizer.register(table)\n rep.randomizer.register(cutlery_props)\n rep.randomizer.register(rect_lights)\n rep.randomizer.register(dome_lights)\n\n # Multiple setup cameras and attach it to render products\n camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length,\n position=cam_position, rotation=cam_rotation, f_stop=f_stop)\n camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2,\n position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n\n # Will render 1024x1024 images and 512x512 images\n render_product = rep.create.render_product(camera, (1024, 1024))\n render_product2 = rep.create.render_product(camera2, (512, 512))\n\n # Initialize and attach writer\n writer = rep.WriterRegistry.get(\"BasicWriter\")\n writer.initialize(output_dir=f\"{local_path}/data/normal/{output_path}\",\n rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)\n writer.attach([render_product, render_product2])\n\n with rep.trigger.on_frame(num_frames=50):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(5)\n\n # Run the simulation graph\n rep.orchestrator.run()\n"},"size":{"kind":"number","value":4170,"string":"4,170"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":37.62037002203361,"string":"37.62037"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.6218225418173087,"string":"0.621823"}}},{"rowIdx":9487,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/objects_position_random.py"},"content":{"kind":"string","value":"import omni.replicator.core as rep\n\nwith rep.new_layer():\n\n # Load in asset\n local_path = \"/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/\"\n TABLE_USD = f\"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd\"\n SPOON_SMALL_USD = f\"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd\"\n SPOON_BIG_USD = f\"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd\"\n FORK_SMALL_USD = f\"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd\"\n FORK_BIG_USD = f\"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd\"\n KNIFE_USD = f\"{local_path}/asset/Collected_Knife/Knife.usd\"\n\n # Camera paramters\n cam_position = (46, 200, 25)\n cam_position2 = (46, 120, 25)\n cam_position_random = rep.distribution.uniform((0, 181, 0), (0, 300, 0))\n cam_rotation = (-90, 0, 0)\n focus_distance = 114\n focus_distance2 = 39.1\n focal_length = 27\n focal_length2 = 18.5\n f_stop = 1.8\n f_stop2 = 1.8\n focus_distance_random = rep.distribution.normal(500.0, 100)\n\n # Cultery path\n current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD\n output_path = current_cultery.split(\".\")[0].split(\"/\")[-1]\n\n def rect_lights(num=1):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(45, 110, 0),\n rotation=(-90, 0, 0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node\n\n def dome_lights(num=3):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(45, 120, 18),\n rotation=(225, 0, 0),\n count=num\n )\n return lights.node\n\n def table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(46, -0.0, 20),\n rotation=(0, -90, -90),\n )\n return table\n\n # Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.\n def cutlery_props(size=15):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(\n current_cultery), size=size, mode='point_instance')\n\n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform(\n (0, 86.3651, 0), (90, 86.3651, 42)),\n rotation=rep.distribution.uniform(\n (-90, -180, -90), (90, 180, 90)),\n )\n return instances.node\n\n # Register randomization\n rep.randomizer.register(table)\n rep.randomizer.register(cutlery_props)\n rep.randomizer.register(rect_lights)\n rep.randomizer.register(dome_lights)\n\n # Multiple setup cameras and attach it to render products\n camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length,\n position=cam_position, rotation=cam_rotation, f_stop=f_stop)\n camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2,\n position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n\n # Will render 1024x1024 images and 512x512 images\n render_product = rep.create.render_product(camera, (1024, 1024))\n render_product2 = rep.create.render_product(camera2, (512, 512))\n\n # Initialize and attach writer\n writer = rep.WriterRegistry.get(\"BasicWriter\")\n writer.initialize(output_dir=f\"{local_path}/data/random/{output_path}\",\n rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)\n writer.attach([render_product, render_product2])\n\n with rep.trigger.on_frame(num_frames=25):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(5)\n\n # Run the simulation graph\n rep.orchestrator.run()\n"},"size":{"kind":"number","value":4172,"string":"4,172"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":37.63888854038066,"string":"37.638889"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.6220038349419933,"string":"0.622004"}}},{"rowIdx":9488,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/objects_position_angled_60.py"},"content":{"kind":"string","value":"import omni.replicator.core as rep\n\nwith rep.new_layer():\n\n # Load in asset\n local_path = \"/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/\"\n TABLE_USD = f\"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd\"\n SPOON_SMALL_USD = f\"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd\"\n SPOON_BIG_USD = f\"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd\"\n FORK_SMALL_USD = f\"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd\"\n FORK_BIG_USD = f\"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd\"\n KNIFE_USD = f\"{local_path}/asset/Collected_Knife/Knife.usd\"\n\n # Camera paramters\n cam_position = (46, 200, 78)\n cam_position2 = (46, 120, 25)\n cam_position_random = rep.distribution.uniform((0, 181, 0), (0, 300, 0))\n cam_rotation = (-60, 0, 0)\n focus_distance = 114\n focus_distance2 = 39.1\n focal_length = 27\n focal_length2 = 18.5\n f_stop = 1.8\n f_stop2 = 1.8\n focus_distance_random = rep.distribution.normal(500.0, 100)\n\n # Cultery path\n current_cultery = KNIFE_USD # Change the item here e.g KNIFE_USD\n output_path = current_cultery.split(\".\")[0].split(\"/\")[-1]\n\n def rect_lights(num=1):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(45, 110, 0),\n rotation=(-90, 0, 0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node\n\n def dome_lights(num=3):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(45, 120, 18),\n rotation=(225, 0, 0),\n count=num\n )\n return lights.node\n\n def table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(46, -0.0, 20),\n rotation=(0, -90, -90),\n )\n return table\n\n # Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.\n def cutlery_props(size=15):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(\n current_cultery), size=size, mode='point_instance')\n\n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform(\n (0, 76.3651, 0), (90, 76.3651, 42)),\n rotation=rep.distribution.uniform(\n (-90, -180, 0), (-90, 180, 0)),\n )\n return instances.node\n\n # Register randomization\n rep.randomizer.register(table)\n rep.randomizer.register(cutlery_props)\n rep.randomizer.register(rect_lights)\n rep.randomizer.register(dome_lights)\n\n # Multiple setup cameras and attach it to render products\n camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length,\n position=cam_position, rotation=cam_rotation, f_stop=f_stop)\n camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2,\n position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n\n # Will render 1024x1024 images and 512x512 images\n render_product = rep.create.render_product(camera, (1024, 1024))\n render_product2 = rep.create.render_product(camera2, (512, 512))\n\n # Initialize and attach writer\n writer = rep.WriterRegistry.get(\"BasicWriter\")\n writer.initialize(output_dir=f\"{local_path}/data/angled_60/{output_path}\",\n rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)\n writer.attach([render_product, render_product2])\n\n with rep.trigger.on_frame(num_frames=25):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(5)\n\n # Run the simulation graph\n rep.orchestrator.run()\n"},"size":{"kind":"number","value":4165,"string":"4,165"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":37.574073726165985,"string":"37.574074"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.6218487393464949,"string":"0.621849"}}},{"rowIdx":9489,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/README.md"},"content":{"kind":"string","value":"---\ndescription: Come learn how to generate photorealistic images in Nvidia Replicator and build object detection model using Edge Impulse.\n---\n\n# The Unreasonable Effectiveness of Synthetic Data\n\nCreated By:\n[George Igwegbe](https://www.linkedin.com/in/george-igwegbe/)\n\nPublic Project Link:\n[GitHub](https://github.com/gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse) | [Edge Impulse](https://studio.edgeimpulse.com/public/187851/latest)\n\n\n\n\n## Introduction\nBuilding an object detection model can be tricky since it requires a large dataset. Sometimes, data can be few or not diverse enough to train a robust model. Synthetic data offers an alternative to generating well-represented datasets to build a quality model. By applying domain randomization, we developed photorealistic datasets, trained a neural network, and validated the model using real datasets. To create a diverse dataset, we created a variety of simulated environments with randomized properties: changing lighting conditions, camera position, and material textures. We also show that synthetic, randomized datasets can help generalize a model to adapt to the real-world environment.\n\n## Story\nWe wanted to replicate the [object detection](https://www.youtube.com/watch?v=Vwv0PJPeC4s) work by Louis Moreau, but this time using synthetic data rather than real data. The project aims to demonstrate how to build and deploy the Edge Impulse object detection model using synthetic datasets generated by Nvidia Omniverse Replicator. The Replicator is an Nvidia Omniverse extension that provides means of generating physically accurate synthetic data.\n\n\n## Why Synthetic Data?\n\nComputer vision tasks such as classification, object detection, and segmentation require a large-scale dataset. Data collected from some real-world applications tend to be narrow and less diverse, often collected from a single environment, and sometimes is unchanged and stays the same for the most time. In addition, data collected from a single field tend to have fewer examples of tail-end scenarios and rare events, and we cannot easily replicate these situations in the real world.\nAndrej Karpathy's presentation - (source: Tesla AI Day, 2021) |\n--- | \n |\n\n\n\nConsequently, models trained in a single domain are brittle and often fail when deployed in another environment; thus, it requires another training cycle to adapt to the new environment. It raises the question, how can we efficiently and cheaply collect generalized data across several domains? A simple unreasonable effective solution is Domain Randomization, which varies the texture and colour of the foreground object, the background image, the number of lights in the scene, the pose of the lights, and the camera position etc. Domain randomization can further improve the variability in the texture of synthetic data of rare events generated in the simulator.\n\n> The purpose of domain randomization is to provide enough simulated variability at training time such that at test time the model is able to generalize to real-world data.” - Tobin et al, Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World, 2017\n\nDomain Randomization for Transferring Deep Neural Networks - source: Tobin et al, 2017) |\n--- | \n |\n\n\n\nNvidia Replicator enables us to perform Domain Randomization. The Replicator is one module within the Omniverse family, and it offers tools and workflow to generate data for various computer vision and non-visual tasks. The Replicator is a highly interoperable tool that integrates with over 40+ modelling/rendering applications across different verticals. The seamless integration is possible thanks to Pixar's Universal Scene Description(USD), which serves as a protocol for various applications such as Blender, 3DMax, Maya, Revit, C4D etc., to work with the Nvidia Replicator.\n\n## Data-Centric Workflow\nTraditional machine learning workflow is often model-centric, focusing more on the model's development by iteratively improving the algorithm design, etc. In this project, we chose the Data-centric approach, where we fixed the model and iteratively improved the quality of the generated dataset. This approach is more robust since we know our model is as good as the dataset. This method hence systematically changes the dataset performance on an AI task. At its core, it is thinking about ML in terms of data, not the model.\n\nData generation and model building workflow |\n--- | \n |\n\n\n\n## Requirements\n- Nvidia Omniverse Replicator \n- Edge Impulse Studio\n- Logitech Webcam HD Pro - C920\n\n### Hardware and Driver Setup\n\nNvidia Omniverse Replicator is a computation-intensive application requiring a moderate-size GPU and decent RAM. My hardware setup consists of 32GB RAM, 1TB storage space and 8GB GPU with an Intel i9 processor.\n\nHardware Specification | Hardware Specification\n--- | ---\n | \n\n\nThe application can run on both Windows and Linux operating systems. For this experiment, we used Ubuntu 20.04 LTS distro, given Ubuntu 18.04 is no longer supported by Nvidia Omniverse as of November 2022. In addition, we selected the appropriate Nvidia driver, v510.108.03 and installed it on a Linux machine.\n\nSoftware Specification | Software Specification\n--- | ---\n | \n\n## Experiment Setup and Data Generation\nThe environment for the experiment consists of movable and immovable objects (dynamic and static positioning objects). The immovable object consists of Lights, a Table and two Cameras. At the same time, the movable objects are the cutlery which is a spoon, fork and knife. We will use domain randomization to alter the properties of some of the movable and immovable objects. Assets which include objects and scenes are represented in the Replicator as USD.\n\nExperimental Setup |\n--- | \n |\n\nEvery object in Omniverse Replicator is represented as USD. A 3D model file with varying extensions such as obj, fbx, and glif can be imported into the Replicator using Nvidia Omniverse's CAD Importer extension. The extension converts the 3D files into USD. We imported our assets (Table, knife, spoon and fork) into the simulator by specifying the path of the assets.\nRectangular Light | Dome Light\n--- | --- |\n | \n\nLightning plays a crucial role in data generation. There are different built-in lighting types in the Nvidia replicator. We choose two rectangular lights and a dome light since they give us better lighting options and capabilities for generating photorealistic images. The rectangular light emulates light generated from a panel, and the dome light lets you dynamically lighten the entire scene. We randomized some light parameters such as temperature and intensity, and both parameters were sampled from a normal distribution. In addition, the scale parameter was sampled from a uniform distribution while keeping the rotation and position of the lights fixed.\n```python\n# Lightning setup for Rectangular light and Dome light \n\ndef rect_lights(num=2):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(-131,150,-134),\n rotation=(-90,0,0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node\n\ndef dome_lights(num=1):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(0,0,0),\n rotation=(270,0,0),\n count=num\n )\nreturn lights.node\n```\nWe fixed the position and rotation, selected the tabletop materials, chose an additional Mahogany material, and alternated the material in the data generation process.\n```python \n# Import and position the table object\n\ndef table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(-135.39745, 0, -140.25696),\n rotation=(0,-90,-90),\n )\n return table\n```\nTo improve our dataset's quality further, we chose two cameras of different resolutions, which we strategically positioned in various locations within the scene. In addition, we varied the position of the cameras in a different version of the data generation process.\n```python\n# Multiple setup cameras and attach it to render products\ncamera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)\ncamera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n\n# Will render 1024x1024 images and 512x512 images\nrender_product = rep.create.render_product(camera1, (1024, 1024))\nrender_product2 = rep.create.render_product(camera2, (512, 512))\n```\nFinally, for the movable objects, which include a knife, spoon and fork, we ensure that these objects can only translate within the bound of the table. So we chose a bounding position where the objects were expected to translate and rotate with the table. We sampled position and rotation from a uniform distribution while maintaining the number of movable objects generated at each iteration to be five.\n```python \n# Define randomizer function for CULTERY assets.\n def cutlery_props(size=5):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')\n\n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),\n rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),\n )\n return instances.node\n\n```\nAt this juncture, we have instantiated all objects in our scene. We can now run the randomizer to generate 50 images at each synthetic generation cycle.\n```python\n# Register randomization\nwith rep.trigger.on_frame(num_frames=50):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(5)\n\n# Run the simulation graph\nrep.orchestrator.run()\n```\nTo ensure we generated photorealistic images, we switched to RTXinteractive(Path Tracing) mode, which gave high-fidelity renderings.\n\nData generation process |\n--- |\n |\n\n\n## Data Distribution and Model Building\n\nData Distribution of different items |\n--- | \n |\nFollowing the data-centric philosophy, We generated three versions of the dataset. The first version, V1, consists of generated images normal to the camera position, and V2 represents images generated at an angle of 60 degrees to the camera position with a mahogany table top. V3 comprises images normal to the camera position while the cutlery were suspended in space.\n\nV1 - Normal to the object |\n--- | \n |\n\n
\n
\n
V2 - Angled to the object
\n
V3 - Normal to the object and object suspended in space
\n
\n
\n
\n
\n
\n
\n\n\n
\n
\n
Generated Dataset - V2
\n
Generated Dataset - V3
\n
\n
\n
\n
\n
\n
\n\n\n## Edge Impulse: Data Annotation and Model Building\n\n
\n
\n
Data Labeler
\n
Data Annotation
\n
\n
\n
\n
\n
\n
\n\n\nWe uploaded the generated images to Edge Impulse Studio, where we annotated the dataset into different classes. We carefully annotated each dataset version and trained using the Yolov5 object detection model. We tried a couple of input sizes ranging from 320, 512 and 1024 pixels before settling with 320. Edge Impulse provided an excellent version control system for models, which enabled us to track model performance across different dataset versions and hyperparameters.\n\n
\n
\n
Create Impulse
\n
Generate Feature
\n
\n
\n
\n
\n
\n
\n\nVersion Control in Edge Impulse |\n--- |\n |\n\n\n\n### Testing of Object Detection Models with Real Objects\n\nWe used the Edge Impulse CLI tool to evaluate the model's accuracy by downloading, building and running the model locally. A Logitech C920 webcam streamed the live video of objects on a table from 50 cm to 80 cm from the camera. The position of the camera remains fixed during the experiment. The clips below show that the trained model does not generalize well to real-world objects. Thus we needed to improve the model by uploading, annotating and training the model with the V2 dataset.\n\nV1 failure - model failed to identify objects |\n--- | \n |\n\n\nWe observed improved model performance when trained with the V2 dataset. The model could identify various objects distinctly, although the model failed when we changed the objects' orientations. Thus, we trained the model with the remaining V3 dataset to mitigate these issues and increase other hyperparameters, such as epochs from 500 to 2000. We also tested the performance of our object detector on real objects with different background textures, and the model performed well in these conditions.\nV2 success - model can identify objects |\n--- |\n |\n\nV2 failure - model failed to identify objects in different orientations |\n--- |\n |\n\n\nAfter several cycles of iterating over various hyperparameters, we got a model that generalizes well across different orientations.\nV3 success - model can identify objects in different orientations |\n--- | \n |\n\nV3 success - model can identify different materials |\n--- | \n |\n\n\nThe core idea behind the data-centric approach to solving ML problems is to create more data around the failure points of the model. We improved the model by iteratively improving the data generation, especially in areas where the model had previously failed.\n\n\n\n## Conclusion\n\nIn this work, we learned how the domain randomization approach helps generate quality and well-generalized datasets for the object detection task. We also demonstrated the effectiveness of data-centric machine learning workflow in improving the model performance. Although this work is restricted to visual problems, we can extend domain randomization to other sensors such as lidar, accelerometer, and ultrasonic sensors.\n\n\n## Reference\n- [Project on Edge Impulse](https://studio.edgeimpulse.com/public/187851/latest)\n- [Introduction to Replicator](https://docs.omniverse.nvidia.com/app_code/prod_extensions/ext_replicator.html)\n- [Introduction to USD](https://developer.nvidia.com/usd#usdnvidia)\n- [Telsa AI Day](https://youtu.be/j0z4FweCy4M?t=5727)\n- [Domain Randomization for Transferring Deep Neural Networks](https://arxiv.org/pdf/1703.06907.pdf)\n- [Understanding Domain Randomization for SIM-TO-REAL Transfer](https://arxiv.org/pdf/2110.03239.pdf)\n\n"},"size":{"kind":"number","value":16016,"string":"16,016"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":55.59717294841988,"string":"55.597173"},"max_line_length":{"kind":"number","value":695,"string":"695"},"alphanum_fraction":{"kind":"number","value":0.7639860139383126,"string":"0.763986"}}},{"rowIdx":9490,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/README_old.md"},"content":{"kind":"string","value":"\n### Synthetic data with Nvidia replicator and Edge Impulse \n\n\n\n- Fixed position \n- Fixed Camera but not random \n- Fixed Lightning and light parameters \n- Changed background materials"},"size":{"kind":"number","value":228,"string":"228"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":24.444441728395365,"string":"24.444442"},"max_line_length":{"kind":"number","value":60,"string":"60"},"alphanum_fraction":{"kind":"number","value":0.7675438562827024,"string":"0.767544"}}},{"rowIdx":9491,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/objects_position_normal_90.py"},"content":{"kind":"string","value":"import omni.replicator.core as rep\n\nwith rep.new_layer():\n\n # Load in asset\n local_path = \"/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/\"\n TABLE_USD =f\"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd\"\n SPOON_SMALL_USD = f\"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd\"\n SPOON_BIG_USD = f\"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd\"\n FORK_SMALL_USD = f\"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd\"\n FORK_BIG_USD = f\"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd\"\n KNIFE_USD = f\"{local_path}/asset/Collected_Knife/Knife.usd\"\n\n\n # Camera paramters\n cam_position = (-131,200,-134)\n cam_position2 = (-131,120,-134)\n cam_position_random = rep.distribution.uniform((0,181,0), (0, 300, 0))\n cam_rotation = (-90,0,0) #(-45,0,0)\n focus_distance = 120\n focus_distance2 = 72\n focal_length = 19.1\n focal_length2 = 7.5\n f_stop = 1.8\n f_stop2 = 1.8\n focus_distance_random = rep.distribution.normal(500.0, 100)\n\n # Cultery path \n current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD\n output_path = current_cultery.split(\".\")[0].split(\"/\")[-1]\n\n def rect_lights(num=2):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(-131,150,-134),\n rotation=(-90,0,0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node \n \n def dome_lights(num=1):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(0,0,0),\n rotation=(270,0,0),\n count=num\n )\n return lights.node \n\n def table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(-135.39745, 0, -140.25696),\n rotation=(0,-90,-90),\n )\n return table \n \n # Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.\n def cutlery_props(size=15):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')\n\n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),\n rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),\n )\n return instances.node\n\n # Register randomization \n rep.randomizer.register(table)\n rep.randomizer.register(cutlery_props)\n rep.randomizer.register(rect_lights)\n rep.randomizer.register(dome_lights)\n\n # Multiple setup cameras and attach it to render products\n camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)\n camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n \n # Will render 1024x1024 images and 512x512 images\n render_product = rep.create.render_product(camera, (1024, 1024))\n render_product2 = rep.create.render_product(camera2, (512, 512))\n\n # Initialize and attach writer\n writer = rep.WriterRegistry.get(\"BasicWriter\")\n writer.initialize(output_dir=f\"{local_path}/data/normal/{output_path}\", rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)\n writer.attach([render_product, render_product2])\n\n with rep.trigger.on_frame(num_frames=50):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(15)\n\n # Run the simulation graph\n rep.orchestrator.run()\n\n"},"size":{"kind":"number","value":4065,"string":"4,065"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":38.09615347984468,"string":"38.096153"},"max_line_length":{"kind":"number","value":153,"string":"153"},"alphanum_fraction":{"kind":"number","value":0.6418204180462926,"string":"0.64182"}}},{"rowIdx":9492,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/objects_position_random.py"},"content":{"kind":"string","value":"import omni.replicator.core as rep\n\nwith rep.new_layer():\n \n # Load in asset\n local_path = \"/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/\"\n TABLE_USD =f\"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd\"\n SPOON_SMALL_USD = f\"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd\"\n SPOON_BIG_USD = f\"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd\"\n FORK_SMALL_USD = f\"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd\"\n FORK_BIG_USD = f\"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd\"\n KNIFE_USD = f\"{local_path}/asset/Collected_Knife/Knife.usd\"\n\n # Camera paramters\n cam_position = (-131,200,-134)\n cam_position2 = (-131,120,-134)\n cam_position_random = rep.distribution.uniform((0,181,0), (0, 300, 0))\n cam_rotation = (-90,0,0) #(-45,0,0)\n focus_distance = 120\n focus_distance2 = 72\n focal_length = 19.1\n focal_length2 = 7.5\n f_stop = 1.8\n f_stop2 = 1.8\n focus_distance_random = rep.distribution.normal(500.0, 100)\n\n # Cultery path \n current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD\n output_path = current_cultery.split(\".\")[0].split(\"/\")[-1]\n\n def rect_lights(num=2):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(-131,150,-134),\n rotation=(-90,0,0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node \n \n def dome_lights(num=1):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(0,0,0),\n rotation=(270,0,0),\n count=num\n )\n return lights.node \n\n def table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(-135.39745, 0, -140.25696),\n rotation=(0,-90,-90),\n )\n return table \n \n # Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.\n def cutlery_props(size=15):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')\n \n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform((-212, 86.2, -187), (-62, 86.2, -94)),\n rotation=rep.distribution.uniform((-90,-180, -90), (90, 180, 90)),\n )\n return instances.node\n\n # Register randomization \n rep.randomizer.register(table)\n rep.randomizer.register(cutlery_props)\n rep.randomizer.register(rect_lights)\n rep.randomizer.register(dome_lights)\n\n # Multiple setup cameras and attach it to render products\n camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)\n camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n\n # Will render 1024x1024 images and 512x512 images\n render_product = rep.create.render_product(camera, (1024, 1024))\n render_product2 = rep.create.render_product(camera2, (512, 512))\n\n # Initialize and attach writer\n writer = rep.WriterRegistry.get(\"BasicWriter\")\n writer.initialize(output_dir=f\"{local_path}/data/random/{output_path}\", rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)\n writer.attach([render_product, render_product2])\n\n with rep.trigger.on_frame(num_frames=25):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(15)\n\n # Run the simulation graph\n rep.orchestrator.run()\n\n"},"size":{"kind":"number","value":4072,"string":"4,072"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":38.54368894617778,"string":"38.543689"},"max_line_length":{"kind":"number","value":153,"string":"153"},"alphanum_fraction":{"kind":"number","value":0.6412082513160098,"string":"0.641208"}}},{"rowIdx":9493,"cells":{"file_path":{"kind":"string","value":"gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/objects_position_normal_60.py"},"content":{"kind":"string","value":"import omni.replicator.core as rep\n\nwith rep.new_layer():\n\n # Load in asset\n local_path = \"/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/\"\n TABLE_USD =f\"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd\"\n SPOON_SMALL_USD = f\"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd\"\n SPOON_BIG_USD = f\"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd\"\n FORK_SMALL_USD = f\"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd\"\n FORK_BIG_USD = f\"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd\"\n KNIFE_USD = f\"{local_path}/asset/Collected_Knife/Knife.usd\"\n\n\n # Camera paramters\n cam_position = (-131,200,-134)\n cam_position2 = (-131,120,-134)\n cam_position_random = rep.distribution.uniform((0,181,0), (0, 300, 0))\n cam_rotation = (-60,0,0) #(-45,0,0)\n focus_distance = 120\n focus_distance2 = 72\n focal_length = 19.1\n focal_length2 = 7.5\n f_stop = 1.8\n f_stop2 = 1.8\n focus_distance_random = rep.distribution.normal(500.0, 100)\n\n # Cultery path \n current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD\n output_path = current_cultery.split(\".\")[0].split(\"/\")[-1]\n\n def rect_lights(num=2):\n lights = rep.create.light(\n light_type=\"rect\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 5000),\n position=(-131,150,-134),\n rotation=(-90,0,0),\n scale=rep.distribution.uniform(50, 100),\n count=num\n )\n return lights.node \n \n def dome_lights(num=1):\n lights = rep.create.light(\n light_type=\"dome\",\n temperature=rep.distribution.normal(6500, 500),\n intensity=rep.distribution.normal(0, 1000),\n position=(0,0,0),\n rotation=(270,0,0),\n count=num\n )\n return lights.node \n\n def table():\n table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])\n\n with table:\n rep.modify.pose(\n position=(-135.39745, 0, -140.25696),\n rotation=(0,-90,-90),\n )\n return table \n \n # Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.\n def cutlery_props(size=15):\n instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')\n\n with instances:\n rep.modify.pose(\n position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),\n rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),\n )\n return instances.node\n\n # Register randomization \n rep.randomizer.register(table)\n rep.randomizer.register(cutlery_props)\n rep.randomizer.register(rect_lights)\n rep.randomizer.register(dome_lights)\n\n # Multiple setup cameras and attach it to render products\n camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)\n camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)\n \n # Will render 1024x1024 images and 512x512 images\n render_product = rep.create.render_product(camera, (1024, 1024))\n render_product2 = rep.create.render_product(camera2, (512, 512))\n\n # Initialize and attach writer\n writer = rep.WriterRegistry.get(\"BasicWriter\")\n writer.initialize(output_dir=f\"{local_path}/data/normal_60/{output_path}\", rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)\n writer.attach([render_product, render_product2])\n\n with rep.trigger.on_frame(num_frames=50):\n rep.randomizer.table()\n rep.randomizer.rect_lights(1)\n rep.randomizer.dome_lights(1)\n rep.randomizer.cutlery_props(15)\n\n # Run the simulation graph\n rep.orchestrator.run()\n\n"},"size":{"kind":"number","value":4068,"string":"4,068"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":38.124999633413466,"string":"38.125"},"max_line_length":{"kind":"number","value":153,"string":"153"},"alphanum_fraction":{"kind":"number","value":0.641838741238486,"string":"0.641839"}}},{"rowIdx":9494,"cells":{"file_path":{"kind":"string","value":"mati-nvidia/window-menu-add/README.md"},"content":{"kind":"string","value":"# Window Menu Add\n\nAn example extension showing how to create a window and add it to the `Window` menu so that it can be shown and hidden\nusing the menu item in the `Window` menu.\n\n## App Link Setup\n\nIf `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.\n\nRun:\n\n```\n> link_app.bat\n```\n\nIf successful you should see `app` folder link in the root of this repo.\n\nIf multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:\n\n```\n> link_app.bat --app create\n```\n\nYou can also just pass a path to create link to:\n\n```\n> link_app.bat --path \"C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4\"\n```\n\n\n## Sharing Your Extensions\n\nThis folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.\n\nLink might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`\n\nNotice `exts` is repo subfolder with extensions. More information can be found in \"Git URL as Extension Search Paths\" section of developers manual.\n\nTo add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path\n\n"},"size":{"kind":"number","value":1387,"string":"1,387"},"lang":{"kind":"string","value":"Markdown"},"avg_line_length":{"kind":"number","value":32.85365773527664,"string":"32.853658"},"max_line_length":{"kind":"number","value":258,"string":"258"},"alphanum_fraction":{"kind":"number","value":0.7476568127269958,"string":"0.747657"}}},{"rowIdx":9495,"cells":{"file_path":{"kind":"string","value":"mati-nvidia/window-menu-add/tools/scripts/link_app.py"},"content":{"kind":"string","value":"import os\nimport argparse\nimport sys\nimport json\nimport packmanapi\nimport urllib3\n\n\ndef find_omniverse_apps():\n http = urllib3.PoolManager()\n try:\n r = http.request(\"GET\", \"http://127.0.0.1:33480/components\")\n except Exception as e:\n print(f\"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\\nError: {e}\")\n sys.exit(1)\n\n apps = {}\n for x in json.loads(r.data.decode(\"utf-8\")):\n latest = x.get(\"installedVersions\", {}).get(\"latest\", \"\")\n if latest:\n for s in x.get(\"settings\", []):\n if s.get(\"version\", \"\") == latest:\n root = s.get(\"launch\", {}).get(\"root\", \"\")\n apps[x[\"slug\"]] = (x[\"name\"], root)\n break\n return apps\n\n\ndef create_link(src, dst):\n print(f\"Creating a link '{src}' -> '{dst}'\")\n packmanapi.link(src, dst)\n\n\nAPP_PRIORITIES = [\"code\", \"create\", \"view\"]\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Create folder link to Kit App installed from Omniverse Launcher\")\n parser.add_argument(\n \"--path\",\n help=\"Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'\",\n required=False,\n )\n parser.add_argument(\n \"--app\", help=\"Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'\", required=False\n )\n args = parser.parse_args()\n\n path = args.path\n if not path:\n print(\"Path is not specified, looking for Omniverse Apps...\")\n apps = find_omniverse_apps()\n if len(apps) == 0:\n print(\n \"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers.\"\n )\n sys.exit(0)\n\n print(\"\\nFound following Omniverse Apps:\")\n for i, slug in enumerate(apps):\n name, root = apps[slug]\n print(f\"{i}: {name} ({slug}) at: '{root}'\")\n\n if args.app:\n selected_app = args.app.lower()\n if selected_app not in apps:\n choices = \", \".join(apps.keys())\n print(f\"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}\")\n sys.exit(0)\n else:\n selected_app = next((x for x in APP_PRIORITIES if x in apps), None)\n if not selected_app:\n selected_app = next(iter(apps))\n\n print(f\"\\nSelected app: {selected_app}\")\n _, path = apps[selected_app]\n\n if not os.path.exists(path):\n print(f\"Provided path doesn't exist: {path}\")\n else:\n SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))\n create_link(f\"{SCRIPT_ROOT}/../../app\", path)\n print(\"Success!\")\n"},"size":{"kind":"number","value":2813,"string":"2,813"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":32.49999961309524,"string":"32.5"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.5623889084385394,"string":"0.562389"}}},{"rowIdx":9496,"cells":{"file_path":{"kind":"string","value":"mati-nvidia/window-menu-add/tools/packman/config.packman.xml"},"content":{"kind":"string","value":"\n \n \n \n\n"},"size":{"kind":"number","value":211,"string":"211"},"lang":{"kind":"string","value":"XML"},"avg_line_length":{"kind":"number","value":34.33332761111206,"string":"34.333328"},"max_line_length":{"kind":"number","value":123,"string":"123"},"alphanum_fraction":{"kind":"number","value":0.691943124682734,"string":"0.691943"}}},{"rowIdx":9497,"cells":{"file_path":{"kind":"string","value":"mati-nvidia/window-menu-add/tools/packman/bootstrap/install_package.py"},"content":{"kind":"string","value":"# Copyright 2019 NVIDIA CORPORATION\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport zipfile\nimport tempfile\nimport sys\nimport shutil\n\n__author__ = \"hfannar\"\nlogging.basicConfig(level=logging.WARNING, format=\"%(message)s\")\nlogger = logging.getLogger(\"install_package\")\n\n\nclass TemporaryDirectory:\n def __init__(self):\n self.path = None\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, type, value, traceback):\n # Remove temporary data created\n shutil.rmtree(self.path)\n\n\ndef install_package(package_src_path, package_dst_path):\n with zipfile.ZipFile(\n package_src_path, allowZip64=True\n ) as zip_file, TemporaryDirectory() as temp_dir:\n zip_file.extractall(temp_dir)\n # Recursively copy (temp_dir will be automatically cleaned up on exit)\n try:\n # Recursive copy is needed because both package name and version folder could be missing in\n # target directory:\n shutil.copytree(temp_dir, package_dst_path)\n except OSError as exc:\n logger.warning(\n \"Directory %s already present, packaged installation aborted\" % package_dst_path\n )\n else:\n logger.info(\"Package successfully installed to %s\" % package_dst_path)\n\n\ninstall_package(sys.argv[1], sys.argv[2])\n"},"size":{"kind":"number","value":1888,"string":"1,888"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":31.56896497294888,"string":"31.568965"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.6869703386191894,"string":"0.68697"}}},{"rowIdx":9498,"cells":{"file_path":{"kind":"string","value":"mati-nvidia/window-menu-add/exts/maticodes.example.window.add/maticodes/example/window/add/extension.py"},"content":{"kind":"string","value":"import carb\nimport omni.ext\nimport omni.kit.ui\n\nfrom .window import MyCustomWindow, WINDOW_TITLE\n\n\nclass WindowMenuAddExtension(omni.ext.IExt):\n def on_startup(self, ext_id):\n carb.log_info(\"[maticodes.example.window.add] WindowMenuAddExtension startup\")\n\n # Note the \"Window\" part of the path that directs the new menu item to the \"Window\" menu.\n self._menu_path = f\"Window/{WINDOW_TITLE}\"\n self._window = None\n self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, self._on_menu_click, True)\n\n def on_shutdown(self):\n carb.log_info(\"[maticodes.example.window.add] WindowMenuAddExtension shutdown\")\n\n omni.kit.ui.get_editor_menu().remove_item(self._menu)\n if self._window is not None:\n self._window.destroy()\n self._window = None\n\n def _on_menu_click(self, menu, toggled):\n \"\"\"Handles showing and hiding the window from the 'Windows' menu.\"\"\"\n if toggled:\n if self._window is None:\n self._window = MyCustomWindow(WINDOW_TITLE, self._menu_path)\n else:\n self._window.show()\n else:\n if self._window is not None:\n self._window.hide()\n"},"size":{"kind":"number","value":1232,"string":"1,232"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":34.228570450612274,"string":"34.22857"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.6217532462485769,"string":"0.621753"}}},{"rowIdx":9499,"cells":{"file_path":{"kind":"string","value":"mati-nvidia/window-menu-add/exts/maticodes.example.window.add/maticodes/example/window/add/__init__.py"},"content":{"kind":"string","value":"from .extension import *\n"},"size":{"kind":"number","value":25,"string":"25"},"lang":{"kind":"string","value":"Python"},"avg_line_length":{"kind":"number","value":11.999994000003,"string":"11.999994"},"max_line_length":{"kind":"number","value":24,"string":"24"},"alphanum_fraction":{"kind":"number","value":0.7599999696000012,"string":"0.76"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":94,"numItemsPerPage":100,"numTotalItems":9731,"offset":9400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzEzNDI3Miwic3ViIjoiL2RhdGFzZXRzL3BlLW5scC9vdi1raXQtY29kZS1maWxlcy12Mi1maWx0ZXJlZCIsImV4cCI6MTc1NzEzNzg3MiwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.kb-6wWCurVhgUnu-9jiHm5-_k16I2N3inQNfhFSgs1UprKn8cTzhtlli2GkoLnL8kBpuLhkpE9ri5tYHugDXBw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
executable_paths = os.getenv("PATH")
paths_list = executable_paths.split(os.path.pathsep) if executable_paths else []
target_path_np = os.path.normpath(sys.argv[2])
target_path_np_nc = os.path.normcase(target_path_np)
for exec_path in paths_list:
if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc:
raise RuntimeError(f"packman will not install to executable path '{exec_path}'")
install_package(sys.argv[1], target_path_np)
import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://localhost:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../app", path)
print("Success!")
### v0.19.0 - 2024-04-01
- Updates for Cesium for Omniverse v0.19.0.
### v0.18.0 - 2024-03-01
- Added samples for Raster Overlays.
- Updates for Cesium for Omniverse v0.18.0.
### v0.17.0 - 2024-02-01
- Added project files for Tileset Clipping tutorial.
- Updates for Cesium for Omniverse v0.17.0.
### v0.16.0 - 2024-01-02
- Added project files for Placing Objects on the Globe tutorial.
- Added project files for Style by Properties tutorial.
- Updates for Cesium for Omniverse v0.16.0.
### v0.15.0 - 2023-12-14
- Updates for Cesium for Omniverse v0.15.0.
### v0.14.0 - 2023-12-01
- Updates for Cesium for Omniverse v0.14.0.
### v0.13.0 - 2023-11-01
- Fixed Google Photorealistic 3D Tiles tutorial sample.
- Updates for Cesium for Omniverse v0.13.0.
### v0.12.0 - 2023-10-25
- Changed Google Photorealistic 3D Tiles samples to go through Cesium ion.
- Added samples for Globe Anchors.
- Added samples for Tileset Clipping.
- Added samples for Tileset Materials.
- Updates for Cesium for Omniverse v0.12.0.
### v0.11.0 - 2023-10-02
- Updates for Cesium for Omniverse v0.11.0.
### v0.10.0 - 2023-09-01
- Updates for Cesium for Omniverse v0.10.0.
### v0.9.0 - 2023-08-01
- Added project files for dynamic skies and sun study tutorial.
- Updates for Cesium for Omniverse v0.9.0.
### v0.8.0 - 2023-07-03
- Updates for Cesium for Omniverse v0.8.0.
### v0.7.0 - 2023-06-01
- Switched to RTX Real-Time renderer for Google 3D Tiles examples.
- Updates for Cesium for Omniverse v0.7.0.
### v0.6.0 - 2023-05-10
- Added samples to showcase Photorealistic 3D Tiles via Google Maps Platform.
- Updates for Cesium for Omniverse v0.6.0.
### v0.5.0 - 2023-05-01
- Updates for Cesium for Omniverse v0.5.0.
### v0.4.0 - 2023-04-03
- Updates for Cesium for Omniverse v0.4.0.
### v0.3.0 - 2023-03-20
- Initial release.
1,831
Markdown
21.9
77
0.700164
CesiumGS/cesium-omniverse-samples/README.md
[](https://cesium.com/)
# Cesium for Omniverse Samples
The Cesium for Omniverse Samples contains a series of USD files to help learn and explore the [Cesium for Omniverse](https://cesium.com/platform/cesium-for-omniverse) extension.
The USDs in this project will walk you through the extension's features and demonstrate global-scale content and experiences in Nvidia Omniverse USD Composer.
The source code for Cesium for Omniverse itself may be found in the [cesium-omniverse](https://github.com/CesiumGS/cesium-omniverse) repo.

*<p align="center">Photogrammetry of San Francisco, California visualized in Omniverse USD Composer, using Cesium for Omniverse.<br>Open <b>examples/SanFrancisco/SanFrancisco.usd</b> in Omniverse USD Composer to experience it yourself!</p>*
### :rocket: Get Started
1. **[Download Cesium for Omniverse Samples](https://github.com/CesiumGS/cesium-omniverse-samples/releases/latest)**.
2. Extract the `.zip` file into a suitable location on your computer.
3. Follow the Quickstart tutorial to setup Cesium for Omniverse with Omniverse USD Composer.
4. Open any of the USD files within this repo to explore them.
Have questions? Ask them on the [community forum](https://community.cesium.com).
## :mountain: USD Descriptions
The content in this repo is split into two main folders - Examples and Tutorials.
### :one: Examples Folder
The example folder contain cities built with various datasets, high quality lighting, and rendering settings optimised for real-time interaction whilst also providing high quality image and video outputs.
#### Denver
In Denver you'll see [Cesium World Terrain](https://cesium.com/platform/cesium-ion/content/cesium-world-terrain/) combined with photogrammetry of the city center, captured by [Aerometrex](https://aerometrex.com.au/).
#### San Francisco
In San Francisco you'll see [Cesium World Terrain](https://cesium.com/platform/cesium-ion/content/cesium-world-terrain/) combined with photogrammetry of the city, captured by [Aerometrex](https://aerometrex.com.au/).
#### Vancouver
In Vancouver you'll see [Cesium World Terrain](https://cesium.com/platform/cesium-ion/content/cesium-world-terrain/) combined with [Cesium OSM Buildings](https://cesium.com/platform/cesium-ion/content/cesium-osm-buildings/).
### :two: Tutorials Folder
The tutorial folder contain USD's representing the completed steps of each tutorial found [here](https://cesium.com/learn/omniverse/).
If you want to see the intended outcome of each tutorial, simply open the corresponding USD.
### :green_book:License
[Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). Cesium for Omniverse Samples is free to use as starter project for both commercial and non-commercial use.
2,907
Markdown
58.346938
240
0.783626
boredengineering/Robots_for_Omniverse/README.md
# Robots_for_Omniverse
The objective of this project is to make developing robotics an engaging and exciting experience.<br/>
> OriginalAuthor:<br/>
> Renan Monteiro Barbosa<br/>
Please feel free to contribute with either robots in openUSD or URDF descriptions that can be converted.<br/>
## openUSD_assets
List of robots converted to openUSD.<br/>
### Quadrupeds
- [Boston Dynamics](https://www.bostondynamics.com/)
- [Spot](https://github.com/chvmp/spot_ros)
- [SpotMicroAI](https://spotmicroai.readthedocs.io/en/latest/)
### Bipedal
- [Agility Robotics](https://agilityrobotics.com/)
- [Digit](https://github.com/adubredu/DigitRobot.jl)
- [Unitree Robotics](https://www.unitree.com/h1/)
- [NJIT - TOCABI](https://github.com/cadop/tocabi)
## URDF_descriptions
It contains all the robot descriptions in URDF.<br/>
Below is the list of all the sources where the URDFs where obtained from.<br/>
### Quadrupeds
- [kodlab_gazebo - Ghost Robotics](https://github.com/KodlabPenn/kodlab_gazebo)
- [ANYbotics](https://github.com/ANYbotics)
- [ANYbotics' ANYmal B](https://github.com/ANYbotics/anymal_b_simple_description)
- [ANYbotics' ANYmal B - Modified for CHAMP](https://github.com/chvmp/anymal_b_simple_description)
- [ANYbotics' ANYmal C](https://github.com/ANYbotics/anymal_c_simple_description)
- [ANYbotics' ANYmal B - Modified for CHAMP](https://github.com/chvmp/anymal_c_simple_description)
- **Boston Dynamic's Little Dog**
- [Boston Dynamic's Little Dog - by RobotLocomotion](https://github.com/RobotLocomotion/LittleDog)
- [Boston Dynamic's Little Dog - Modified for CHAMP](https://github.com/chvmp/littledog_description)
- **Boston Dynamic's Spot**
- [Boston Dynamic's Spot - by heuristicus](https://github.com/heuristicus/spot_ros)
- [Boston Dynamic's Spot - Modified for CHAMP](https://github.com/chvmp/spot_ros)
- [Dream Walker](https://github.com/Ohaginia/dream_walker)
- [MIT Mini Cheetah - Original](https://github.com/HitSZwang/mini-cheetah-gazebo-urdf)
- [MIT Mini Cheetah - Modified for CHAMP](https://github.com/chvmp/mini-cheetah-gazebo-urdf)
- [OpenDog V2 - Original](https://github.com/XRobots/openDogV2)
- [OpenDog V2 - Modified for CHAMP](https://github.com/chvmp/opendog_description)
- **Open Quadruped**
- [Open Quadruped](https://github.com/moribots/spot_mini_mini)
- [SpotMicroAI - Gitlab](https://gitlab.com/custom_robots/spotmicroai)
- [Spot Micro](https://github.com/chvmp/spotmicro_description)
- [Unitree Robotics All](https://github.com/unitreerobotics/unitree_ros)
- [Unitree Robotics' Youtube](https://www.youtube.com/@unitreerobotics7482)
- [Unitree Robotics All - Modified for CHAMP](https://github.com/chvmp/unitree_ros)
- [Unitree Robotics' A1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/a1_description)
- [Unitree Robotics' AliengoZ1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/aliengoZ1_description)
- [Unitree Robotics'Aliengo](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/aliengo_description)
- [Unitree Robotics' B1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/b1_description)
- [Unitree Robotics' Go1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/go1_description)
- [Unitree Robotics' Laikago](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/laikago_description)
- [Unitree Robotics' Z1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/z1_description)
- [Stochlab's Stochlite](https://stochlab.github.io/)
- [Stochlab's Stochlite - Modified by aditya-shirwatkar](https://github.com/aditya-shirwatkar/stochlite_description)
- **Mini Pupper**
- [MangDang's Mini Pupper](https://github.com/mangdangroboticsclub/QuadrupedRobot)
- [simplified robot description of the MangDang's Mini Pupper](https://github.com/nisshan-x/mini_pupper_description)
- [Stanford pupper - Original](https://stanfordstudentrobotics.org/pupper)
- [Stanford pupper - Modified by Chandykunju Alex](https://github.com/chandyalex/stanford_pupper_description.git)
### Bipedal
- [Agility Robotics' Cassie - UMich-BipedLab](https://github.com/UMich-BipedLab/cassie_description)
- [Agility Robotics' Digit - DigitRobot.jl](https://github.com/adubredu/DigitRobot.jl)
- [NJIT - TOCABI](https://github.com/cadop/tocabi)
- [Unitree H1](https://github.com/google-deepmind/mujoco_menagerie/tree/main/unitree_h1)
### Manipulation
- [GoogleAI ROBEL D'Kitty](https://github.com/google-research/robel-scenes)
- [GoogleAI ROBEL D'Kitty - Modified for CHAMP](https://github.com/chvmp/dkitty_description)
- [The Shadow Robot Company](https://github.com/shadow-robot)
- [Shadow Hand - archived](https://github.com/AndrejOrsula/shadow_hand_ign)
# Appendix
## Notes<br/>
- NJIT-TOCABi has a high poly and low poly version, this repo has the low poly version [light_weight](https://github.com/cadop/tocabi/tree/main/light_weight).<br/>
- Dream Walker usd files are too large. Could not commit instanceable_meshes.usd<br/>
RobotEra TECHNOLOGY CO.,LTD.
Founded in 2023, RobotEra TECHNOLOGY CO., LTD focuses on the R&D of embodied AI general-purpose humanoid robots.
https://github.com/roboterax
# openDogV2
> Original Author: <br/>
> James Bruton <br/>
> Xrobots<br/>
> Modified by:
> Renan Monteiro Barbosa<br/>
Purpose: Adapt this opensource quadruped robot project for the standards of the Isaac-Sim simulator.<br/>
>Sources:<br/>
>- [OpenDog V2 - Original](https://github.com/XRobots/openDogV2)<br/>
>- [OpenDog V2 - Modified for CHAMP](https://github.com/chvmp/opendog_description)<br/>
> CAD and Code that relates to this YouTube series:<br/>
> https://www.youtube.com/playlist?list=PLpwJoq86vov9CcmrLGyM2XyyYDAYG0-Iu
- **Release 1:** created at the end of part 6 of the YouTube series. Please note the issues stated at the end of this video.<br/>
- **Release 2:** created at the end of part 7 of the YouTube series. Please note the issues stated during this video. Note that the remote is unchanged since release 1.<br/>
- **Relase 3:** created for part 8 of the YouTube series. Includes the modified knee motor pulley, Python and Arduino code for the deep learning model.<br/>
## Related Community Projects:
OpenDog URDF/config for CHAMP: https://github.com/chvmp/opendog_description
'openDog 2.1' with higher belt reductions and cooling fans: https://github.com/J-DIndustries/openDog-V2.1
# Modified for the OpenUSD format
Import on Isaac-Sim
# Unitree H1 Description (MJCF)
Requires MuJoCo 2.2.2 or later.
## Overview
This package contains a simplified robot description (MJCF) of the [H1 Humanoid
Robot](https://www.unitree.com/h1/) developed by [Unitree
Robotics](https://www.unitree.com/). The original URDF and assets were provided
directly by [Unitree Robotics](https://www.unitree.com/) under a [BSD-3-Clause
License](LICENSE).
<p float="left">
<img src="h1.png" width="400">
</p>
## URDF → MJCF derivation steps
1. Added `<mujoco> <compiler discardvisual="false" strippath="false" fusestatic="false"/> </mujoco>` to the URDF's
`<robot>` clause in order to preserve visual geometries.
2. Loaded the URDF into MuJoCo and saved a corresponding MJCF.
3. Manually edited the MJCF to extract common properties into the `<default>` section.
4. Added actuators.
5. Added `scene.xml` which includes the robot, with a textured groundplane, skybox, and haze.
## License
This model is released under a [BSD-3-Clause License](LICENSE).
# MIT Mini Cheetah
An urdf description file of a quadruped robot modeled on mini cheetah.
>Source: <br/>
>- YOBOTICS, INC.<br/>
>- [MIT Mini Cheetah - Original](https://github.com/HitSZwang/mini-cheetah-gazebo-urdf)<br/>
>- [MIT Mini Cheetah - Modified for CHAMP](https://github.com/chvmp/mini-cheetah-gazebo-urdf)<br/>
# Boston Dynamics Robots
https://www.bostondynamics.com/
## Little Dog
> Source:<br/>
> - [Boston Dynamic's Little Dog - by RobotLocomotion](https://github.com/RobotLocomotion/LittleDog)
> - [Boston Dynamic's Little Dog - Modified for CHAMP](https://github.com/chvmp/littledog_description)
## Spot
> Source:<br/>
> - [Boston Dynamic's Spot - by heuristicus](https://github.com/heuristicus/spot_ros)
> - [Boston Dynamic's Spot - Modified for CHAMP](https://github.com/chvmp/spot_ros)
486
Markdown
33.785712
102
0.726337
XiaomingY/omni-ies-viewer/README.md
# IES Viewer Omniverse Extension

This extension displays IES profile web for selected light objects. It is particularly useful for visualizing architectural lighting designs. Orientation of measured light distribution profiles could be quickly tested with visual feedback. IES files are resampled to be light weight and consistant to render. [A video demo](https://drive.google.com/file/d/1DxvjVGT6ZlfukfuTvyBu3iXaHz8qvY5Q/view?usp=sharing)
This extension is developed based on the [omni.example.ui_scene.object_info](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene/tree/main/exts/omni.example.ui_scene.object_info)
Supported light type: sphere light, rectangular light, disk light and cylinder light.
Only Type C IES file is supported currently, which is also the most commonly used type for architectural light.
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go to Extension Manager and turn on Viewport Utility extension
2. Add `git://github.com/XiaomingY/omni-ies-viewer.git?branch=main&dir=exts` to extension search path
3. Turn on IES Viewer Extension
import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
import omni.ext
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport_window
from .viewport_scene import ViewportSceneInfo
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class AimingToolExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self) -> None:
super().__init__()
self.viewport_scene = None
def on_startup(self, ext_id):
viewport_window = get_active_viewport_window()
self.viewport_scene = ViewportSceneInfo(viewport_window, ext_id)
def on_shutdown(self):
if self.viewport_scene:
self.viewport_scene.destroy()
self.viewport_scene = None
from omni.ui import scene as sc
import omni.ui as ui
from .object_info_manipulator import ObjInfoManipulator
from .object_info_model import ObjInfoModel
class ViewportSceneInfo():
"""The Object Info Manipulator, placed into a Viewport"""
def __init__(self, viewport_window, ext_id) -> None:
self.scene_view = None
self.viewport_window = viewport_window
# NEW: Create a unique frame for our SceneView
with self.viewport_window.get_frame(ext_id):
# Create a default SceneView (it has a default camera-model)
self.scene_view = sc.SceneView()
# Add the manipulator into the SceneView's scene
with self.scene_view.scene:
ObjInfoManipulator(model=ObjInfoModel())
# Register the SceneView with the Viewport to get projection and view updates
self.viewport_window.viewport_api.add_scene_view(self.scene_view)
def __del__(self):
self.destroy()
def destroy(self):
if self.scene_view:
# Empty the SceneView of any elements it may have
self.scene_view.scene.clear()
# un-register the SceneView from Viewport updates
if self.viewport_window:
self.viewport_window.viewport_api.remove_scene_view(self.scene_view)
# Remove our references to these objects
self.viewport_window = None
self.scene_view = None
from pxr import Tf
from pxr import Gf
from pxr import Usd
from pxr import UsdGeom
from pxr import UsdShade
from pxr import UsdLux
from .IESReader import IESLight
import os.path
import numpy as np
from omni.ui import scene as sc
import omni.usd
def _flatten_matrix(matrix: Gf.Matrix4d):
m0, m1, m2, m3 = matrix[0], matrix[1], matrix[2], matrix[3]
return [
m0[0],
m0[1],
m0[2],
m0[3],
m1[0],
m1[1],
m1[2],
m1[3],
m2[0],
m2[1],
m2[2],
m2[3],
m3[0],
m3[1],
m3[2],
m3[3],
]
class ObjInfoModel(sc.AbstractManipulatorModel):
"""
The model tracks the position and info of the selected object.
"""
class MatrixItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the tranformation. It doesn't contain anything
because we take the tranformation directly from USD when requesting.
"""
identity = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
def __init__(self):
super().__init__()
self.value = self.identity.copy()
class PositionItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self) -> None:
super().__init__()
self.value = [0, 0, 0]
class PositionList(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self) -> None:
super().__init__()
self.value = [[0,0,0]]
def __init__(self) -> None:
super().__init__()
# Current selected prim list
self.prim = []
self.current_path = []
self.material_name = []
self.stage_listener = None
self.horizontal_step = 15
self.vertical_step = 15
self.IESPoints = [ObjInfoModel.PositionList()]
self.transformation = [ObjInfoModel.MatrixItem()]
# Save the UsdContext name (we currently only work with a single Context)
self.usd_context = self._get_context()
# Track selection changes
self.events = self.usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self.on_stage_event, name="Object Info Selection Update"
)
@property
def _time(self):
return Usd.TimeCode.Default()
def _get_context(self) -> Usd.Stage:
# Get the UsdContext we are attached to
return omni.usd.get_context()
#Update when light are transformed or modified
def notice_changed(self, notice: Usd.Notice, stage: Usd.Stage) -> None:
"""Called by Tf.Notice. Used when the current selected object changes in some way."""
light_path = self.current_path
if not light_path:
return
for p in notice.GetChangedInfoOnlyPaths():
prim_path = p.GetPrimPath().pathString
#check if prim_path not in selected list but parent of prim_path is in selected list
if prim_path not in light_path:
if (True in (light_path_item.startswith(prim_path) for light_path_item in light_path)):
if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):
self._item_changed(self.transformation[0])
continue
if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):
self._item_changed(self.transformation[0])
#if light property changed such as ies file changed, update profile
self._item_changed(self.transformation[0])
def _get_transform(self, time: Usd.TimeCode):
"""Returns world transform of currently selected object"""
if not self.prim:
return [ObjInfoModel.MatrixItem.identity.copy()]
# Compute matrix from world-transform in USD
#get transform matrix for each selected light
world_xform_list = [UsdGeom.BasisCurves(prim).ComputeLocalToWorldTransform(time) for prim in self.prim]
# Flatten Gf.Matrix4d to list
return [_flatten_matrix(world_xform) for world_xform in world_xform_list]
def get_item(self, identifier):
if identifier == "IESPoints":
return self.IESPoints
if identifier == "transformation":
return self.transformation
def get_as_floats(self, item):
if item == self.transformation:
return self._get_transform(self._time)
if item == self.IESPoints:
return self.get_points(self._time)
return []
#get ies points for each selected light
def get_points(self, time: Usd.TimeCode):
if not self.prim:
return [[0,0,0]]
allIESPoint = []
for prim in self.prim:
iesFile = prim.GetAttribute('shaping:ies:file').Get()
allIESPoint.append(IESLight(str(iesFile).replace('@', '')).points)
return allIESPoint
def on_stage_event(self, event):
"""Called by stage_event_stream. We only care about selection changes."""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self.current_path = []
self.prim = []
primList = []
primPathList = []
usd_context = self._get_context()
stage = usd_context.get_stage()
if not stage:
return
prim_paths = usd_context.get_selection().get_selected_prim_paths()
if not prim_paths:
# This turns off the manipulator when everything is deselected
self._item_changed(self.transformation[0])
return
#select light with ies file applied.
lightCount = 0
for i in prim_paths:
prim = stage.GetPrimAtPath(i)
if(UsdLux.Light(prim) and prim.GetAttribute('shaping:ies:file').Get() and not (prim.IsA(UsdLux.DistantLight))):
primList.append(prim)
primPathList.append(i)
lightCount = lightCount +1
if(lightCount==0):
if self.stage_listener:
self.stage_listener.Revoke()
self.stage_listener = None
self._item_changed(self.transformation[0])
return
if not self.stage_listener:
# This handles camera movement
self.stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self.notice_changed, stage)
self.prim = primList
self.current_path = primPathList
# Position is changed because new selected object has a different position
self._item_changed(self.transformation[0])
def destroy(self):
self.events = None
self.stage_event_delegate.unsubscribe()
from __future__ import division
from omni.ui import scene as sc
from omni.ui import color as cl
import omni.ui as ui
import numpy as np
class ObjInfoManipulator(sc.Manipulator):
"""Manipulator that displays the object path and material assignment
with a leader line to the top of the object's bounding box.
"""
def on_build(self):
"""Called when the model is changed and rebuilds the whole manipulator"""
if not self.model:
return
IESPoints = self.model.get_as_floats(self.model.IESPoints)
numHorizontal = int((360/self.model.horizontal_step)+1)
primCount = 0
for transformation in self.model.get_as_floats(self.model.transformation):
self.__root_xf = sc.Transform(transformation)
with self.__root_xf:
self._x_xform = sc.Transform()
with self._x_xform:
self._shape_xform = sc.Transform()
IESPoint = IESPoints[primCount]
numVertical = int(len(IESPoint)/numHorizontal)
for index in range(0,numHorizontal):
points = IESPoint[index*numVertical:(index+1)*numVertical]
if(len(points)>0):
sc.Curve(points.tolist(), thicknesses=[1.0], colors=[cl.yellow],tessellation=9)
primCount = primCount+1
def on_model_updated(self, item):
# Regenerate the manipulator
self.invalidate()
import numpy as np
import re
import math
#import matplotlib.pyplot as plt
from scipy import interpolate
import os.path
#from mpl_toolkits.mplot3d.axes3d import Axes3D
import omni.ext
import omni.ui as ui
omni.kit.pipapi.install("astropy")
from astropy.coordinates import spherical_to_cartesian
DEFAULT_HORIZONTAL_STEP = 15
DEFAULT_VERTICAL_STEP = 15
IES_MaxLength = 80
class IESLight():
def __init__(self,iesFile):
# Current selected prim
if iesFile and os.path.exists(iesFile):
self.file = iesFile
else:
return
self.width = 0
self.length = 0
self.radius = 0
all_values = self.readIESfile(self.file)
verticalAngles,horizontalAngles,intensities,self.width,self.length,self.radius = self.getIESproperties(all_values)
horizontalAnglesMirrored, intensityMirrored = self.mirrorAngles(horizontalAngles,intensities)
horizontalResampled = np.arange(0, 361, DEFAULT_HORIZONTAL_STEP)
verticalResampled = np.arange(0, verticalAngles[-1]+1, DEFAULT_VERTICAL_STEP)
resampledIntensity = self.interpolateIESValues(np.array(horizontalAnglesMirrored),np.array(verticalAngles),horizontalResampled,verticalResampled,intensityMirrored)
self.points = self.IESCoord2XYZ(horizontalResampled,verticalResampled,resampledIntensity,IES_MaxLength)
#read ies files and return vertical angles, horizontal angles, intensities, width, length, radius.
#based on the symmetry, horizontal angles and resampled
def readIESfile(self, fileName):
f=open(fileName, encoding = "ISO-8859-1")#need rb to read \r\n correctly. Otherwise universial newline function ignores carriage return.
startReading = 0
line = f.readline()
allValues = ""
while line:
if( not(line.strip())):
break
else:
#after this line, there are actual useful values
if("TILT=NONE" in line.strip()):
line = f.readline()
startReading = 1
#read all number to one string
if(startReading):
allValues = allValues+line
line = f.readline()
f.close()
#one array with all values
dimentions = re.split('\s+',allValues.strip())
return dimentions
def getIESproperties(self, allValues):
#return
FEET2METER = 0.3048
verticalAngles = []
horizontalAngles = []
width = 0
length = 0
radius = 0
intensityMultiplier = 1
numberVerticalAngle = 0
numberHorizontalAngle = 0
unit = 1 #1 for feet, 2 for meter
#number of vertical angles and horizontal angles measured
numberVerticalAngle = int(allValues[3])
numberHorizontalAngle = int(allValues[4])
#check if shape is rectangle or disk
if(float(allValues[7])<0):
radius = allValues[7]*-1
else:
width = allValues[7]
length = allValues[8]
#convert dimentions to meter if measured in feet
if(float(allValues[6])==1):
radius = radius*FEET2METER
width = width *FEET2METER
length = length * FEET2METER
#the actual vertical angles and horizontal angles in list
verticalAngles = list(map(float, allValues[13:13+numberVerticalAngle]))
horizontalAngles = list(map(float,allValues[13+numberVerticalAngle:13+numberVerticalAngle+numberHorizontalAngle]))
#read intensities and convert it to 2d array
intensities = np.array(allValues[13+numberVerticalAngle+numberHorizontalAngle:len(allValues)])
intensities = intensities.reshape(numberHorizontalAngle,numberVerticalAngle).astype(np.float16)
return verticalAngles,horizontalAngles,intensities,width,length,radius
#ies could have several symmetry:
#(1)only measured in one horizontal angle (0) which need to be repeated to all horizontal angle from 0 to 360
#(2)only measured in horizontal angles (0~90) which need to be mirrored twice to horizontal angle from 0 to 360
#(3)only measured in horizontal angles (0~180) which need to be mirrored to horizontal angle from 0 to 360
#(4)only measured in horizontal angles (0~360) which could be used directly
def mirrorAngles(self, horizontalAngles,intensities):
#make use of symmetry in the file and produce horizontal angles from 0~360
if(horizontalAngles[-1]==0):
horizontalAnglesMirrored = list(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))
else:
horizontalAnglesMirrored = list(np.arange(0,361,horizontalAngles[-1]/(len(horizontalAngles)-1)))
#make use of symmetry in the file and copy intensitys for horizontal angles from 0~360
if(horizontalAngles[-1]==90):
#mirror results [90:180]
a = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)
intensityMirrored = np.concatenate((a, np.flip(a, 0)[1:]), axis=0)
elif(horizontalAngles[-1]==180):
intensityMirrored = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)
elif(horizontalAngles[-1]==0):
intensityMirrored = np.array(([intensities[0],]*len(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))))
else:
#print("Symmetry 360")
intensityMirrored = intensities
return horizontalAnglesMirrored, intensityMirrored
def IESCoord2XYZ(self, horizontalAngles,verticalAngles,intensity,maxLength):
maxValue = np.amax(intensity)
if(maxValue>maxLength):
intensity = intensity*(maxLength/maxValue)
for index, horizontalAngle in enumerate(horizontalAngles):
if(index ==0):
#Omniverse and 3ds Max makes the light upside down, horizontal angle rotation direction need to be flipped.
points = np.array(spherical_to_cartesian(intensity[index].tolist(), [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()
else:
newPoints = np.array(spherical_to_cartesian(intensity[index], [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()
points = np.concatenate((points, newPoints), axis=0)
#Omniverse and 3ds Max makes the light upside down, so flip z.
points[:,2] *= -1
return points
def interpolateIESValues(self, originalHorizontalAngles, originalVerticalAngles, newHorizontalAngles,newVerticalAngles, intensity):
fun = interpolate.interp2d(originalVerticalAngles, originalHorizontalAngles, intensity, kind='linear') # kind could be {'linear', 'cubic', 'quintic'}
interpolatedIntensity = fun(newVerticalAngles,newHorizontalAngles)
return interpolatedIntensity
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
authors = ["Xiaoming Yang"]
# The title and description fields are primarily for displaying extension info in UI
title = "IES Viewer For Display IES Light Profiles"
description="This extension displays IES profiles for selected light objects."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/XiaomingY/omni-ies-viewer"
# One of categories for UI.
category = "Lighting"
# Keywords for the extension
keywords = ["Lighting", "IES"]
changelog = "docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.ui.scene" = { }
"omni.usd" = { }
"omni.kit.viewport.utility" = { }
# Main python module this extension provides, it will be publicly available as "import AimingTool".
[[python.module]]
name = "IESViewer"
# IES Viewer Omniverse Extension

This extension displays IES profile web for selected light objects. It is particularly useful for visualizing architectural lighting designs. Orientation of measured light distribution profiles could be quickly tested with visual feedback. IES files are resampled to be light weight to render.
This entension is developed based on the [omni.example.ui_scene.object_info](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene/tree/main/exts/omni.example.ui_scene.object_info)
Supported light type: sphere light, rectangular light, disk light and cylinder light.
Only Type C IES file is supported currently, which is also the most commonly used for architectural light.
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go to Extension Manager and turn on Viewport Utility extension
2. Turn on IESView Extension
910
Markdown
55.937497
293
0.807692
Ekozmaster/NvidiaOmniverseRTXRemixTools/README.md
# RTX Remix Tools [ekozerski.rtxremixtools]
Focusing on improving RTX Remix modding workflows, this extension is designed to speed up iteration when producing assets and mods by providing useful UI operations inside Omniverse apps like USD Composer/Create or Code.
It provides some options for the "Right click" context menu to setup ideal replacement assets, as well as some converting operations to ensure assets will be compatible with the Remix runtime.

It is primarily designed to operate on Remix captured scenes, so users can have instant feedbacks on what their mods are gonna look like in the game scenes and iterate faster.
## Available Tools
### Fix Meshes Geometry
<i>(Operation is performed on every mesh of a USD/USDA source file and can\'t be undone)</i>
Interpolation Mode
- RTX Remix runtime only supports meshes with "vertex" interpolation mode, in which "points" "normals" and "uvs" arrays
must have the same length, but DCC tools usually export the mesh using "faceVarying" interpolation mode.
This operation reorganizes the geometry to be compatible with the runtime.
- See: "Interpolation of Geometric Primitive Variables" - https://openusd.org/dev/api/class_usd_geom_primvar.html
- This operation only applies for meshes inside the mods folder, not the captured ones.
UV Maps
- The runtime supports one single UV map per mesh, which should have one of a few known names, so this script finds many variations, picks one and renames to the standard "primvars:st", while also setting the appropriate type as "TextureCoordinate" (TexCoord2fArray / TexCoord2f[]). The other UVmaps are discarded.
Unused Primvars
- displayColor and displayOpacity are now removed from the mesh.
### Setup for Mesh Replacement
Exports the selected mesh in a selected path, already setting up the replacements and references to work in the runtime, so for every change the user only needs to:
- Open the exported mesh in it's DCC of choice, make the changes and export again (with the right settings, triangulating faces, no materials, etc.)
- Back in OV, refresh the reference to see the changes in the captured scene.
- Use the "Fix Meshes Geometry" again to make it Remix-compatible.
- Enjoy.
The original mesh is kept in case the user only wants to add more models. Make sure to delete it if the intention is to completely replace the original mesh.
### Add Model
If the user already has authored USD models, this option allows to select multiple models and add to the mesh_HASH prim.
### Add Material
This option allows to select a material .MDL file (AperturePBR_Opacity.mdl or AperturePBR_Translucent.mdl) to add a material prim to the mesh_HASH prim.
### Original Draw Call Preservation
Allows to set the "custom int preserveOriginalDrawCall" attribute to indicate whether the runtime should be forced to render the original mesh or not. Must be set to 1 when placing custom lights or else the original mesh disappears. PS: Remember to set this to 0 if you want to make a mesh replacement and remove the original mesh.
### Select Source Mesh
Quick way to select the originial source mesh_HASH prim in the scene when you have an instance prim selected.
<br>
## Things to Keep in mind
- In a capture scene, any changes made to the "inst_SOMEHASH_x" prims won't show up in the runtime, so every changes must be done in the "mesh_SOMEHASH" they're referencing. Whenever the user clicks a inst_ prim to perform an action like Fixing geometry or Add Model (Ex: Right clicking in the 3D viewport), this tool will try to find the referenced mesh_SOMEHASH and perform the operations in it instead.
- Having that in mind, always keep an eye in the "Layers" tab to check if you have done any changes to the "instances" path. Try to delete those changes as much as possible.
- The only material types that work in the runtime are described in the AperturePBR_Opacity.MDL and AperturePBR_Translucent.MDL, and every mesh must be triangulated. If you want to add a model you got from somewhere else like an asset store, make sure to convert the assets to work in the runtime.
- When placing lights in the scene, it is necesssary to set an int "preserveOriginalDrawCall" to "1" in order to keep rendering the original mesh. If another layer is setting this flag somewhere and you want to replace/remove the original mesh in your own layer, you will notice that the original mesh can't be removed without setting this flag back to "0". You can do that on your own layer, set it back to "0", but make sure your layer comes on top of the other one that sets it to true.
import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
from collections import OrderedDict
import os
from pxr import UsdGeom, Usd, Sdf
import omni.usd as usd
from ekozerski.rtxremixtools.commons import log_error
def get_selected_mesh_prims():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
meshes = {
prim_path: prim
for prim_path, prim in selected_prims.items()
if UsdGeom.Mesh(prim)
}
return meshes
def convert_mesh_to_vertex_interpolation_mode(mesh):
"""
This method attemps to convert Remix meshes' interpolation mode from constant or faceVarying to vertex.
If there is any faceVarying attribute, it means the data arrays (points, uvs, normals...) will have different
lengths, so this script will copy data around using the faceVertexIndices array to ensure they all end up with the
same length.
"""
# TODO: Study interpolation modes in depth to implement a decent conversion script.
prim = mesh.GetPrim()
primvar_api = UsdGeom.PrimvarsAPI(prim)
primvars = {var for var in primvar_api.GetPrimvars()}
face_varying_primvars = [v for v in primvars if v.GetInterpolation() == UsdGeom.Tokens.faceVarying]
if face_varying_primvars or mesh.GetNormalsInterpolation() == UsdGeom.Tokens.faceVarying:
non_face_varying_primvars = list(primvars.difference(face_varying_primvars))
non_face_varying_primvars = [var for var in non_face_varying_primvars if var.GetInterpolation() != 'uniform']
indices = prim.GetAttribute("faceVertexIndices")
# Settings points separately since it doesn't have a "SetInterpolation" like primvars.
points = prim.GetAttribute("points")
points_arr = points.Get()
new_arr = [points_arr[i] for i in indices.Get()]
points.Set(new_arr)
for var in non_face_varying_primvars:
original_arr = var.Get()
if original_arr:
new_arr = [original_arr[i] for i in indices.Get()]
var.Set(new_arr)
indices.Set([i for i in range(len(indices.Get()))])
[var.SetInterpolation(UsdGeom.Tokens.vertex) for var in primvars]
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
def convert_uv_primvars_to_st(mesh):
# https://github.com/NVIDIAGameWorks/dxvk-remix/blob/ebb0ecfd638d6a32ab5f10708b5b07bc763cf79b/src/dxvk/rtx_render/rtx_mod_usd.cpp#L696
# https://github.com/Kim2091/RTXRemixTools/blob/8ae25224ef8d1d284f3e208f671b2ce6a35b82af/RemixMeshConvert/For%20USD%20Composer/RemixMeshConvert_OV.py#L4
known_uv_names = [
'primvars:st',
'primvars:uv',
'primvars:st0',
'primvars:st1',
'primvars:st2',
'primvars:UVMap',
'primvars:UVChannel_1',
'primvars:map1',
]
# Preserving the order of found primvars to use the first one, in case a primvars:st can't be found.
primvar_api = UsdGeom.PrimvarsAPI(mesh)
uv_primvars = OrderedDict(
(primvar.GetName(), primvar)
for primvar in primvar_api.GetPrimvars()
if primvar.GetTypeName().role == 'TextureCoordinate'
or primvar.GetName() in known_uv_names
)
if not uv_primvars:
return
# Picking only one UV and blowing up everything else as the runtime only reads the first anyway.
considered_uv = uv_primvars.get('primvars:st') or next(iter(uv_primvars.values()))
uv_data = considered_uv.Get()
[primvar_api.RemovePrimvar(uv_name) for uv_name in uv_primvars.keys()]
# Recreating the primvar with appropriate name, type and role
new_uv_primvar = primvar_api.CreatePrimvar('primvars:st', Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.vertex)
new_uv_primvar.Set(uv_data)
def remove_unused_primvars(mesh):
unused_primvar_names = [
'primvars:displayColor',
'primvars:displayOpacity',
]
primvar_api = UsdGeom.PrimvarsAPI(mesh)
[primvar_api.RemovePrimvar(uv_name) for uv_name in unused_primvar_names]
def fix_meshes_in_file(usd_file_path):
stage = Usd.Stage.Open(usd_file_path)
mesh_prims = [prim for prim in stage.TraverseAll() if UsdGeom.Mesh(prim)]
for prim in mesh_prims:
faceVertices = prim.GetAttribute("faceVertexCounts").Get()
if not faceVertices or not all({x == 3 for x in faceVertices}):
log_error(f"Mesh {prim.GetPath()} in '{usd_file_path}' hasn't been triangulated and this tools doesn't do that for you yet :(")
continue
convert_mesh_to_vertex_interpolation_mode(UsdGeom.Mesh(prim))
convert_uv_primvars_to_st(UsdGeom.Mesh(prim))
remove_unused_primvars(UsdGeom.Mesh(prim))
stage.Save()
def is_a_captured_mesh(mesh):
"""
Returns True if the Mesh's defining USD file is located in the captures folder.
"""
return os.path.normpath("captures/meshes") in os.path.normpath(mesh.GetPrimStack()[-1].layer.realPath)
def fix_meshes_geometry():
meshes = {k: v for k,v in get_selected_mesh_prims().items() if not is_a_captured_mesh(v)}
for path, mesh in meshes.items():
source_layer = mesh.GetPrimStack()[-1].layer
fix_meshes_in_file(source_layer.realPath)
source_layer.Reload()
import os
from typing import List
from omni import usd, kit
from omni.kit.window.file_importer import get_file_importer
from omni.client import make_relative_url
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage):
def create_material_from_mdl_file(filename: str, dirname: str, selections: List[str] = []):
if not filename.endswith('mdl'):
raise ValueError(f"The selected file '{filename}' doesn't have a mdl extension.")
mesh_hash_path = mesh_hash.GetPath().pathString
counter = 0
material_name = os.path.basename(filename).replace('.mdl', '')
new_material_path = mesh_hash_path + f'/{material_name}_{counter}'
while current_stage.GetPrimAtPath(new_material_path).IsValid():
counter += 1
new_material_path = mesh_hash_path + f'/{material_name}_{counter}'
# TODO: Get material name by inspecting the MDL file rather than guessing from it's name, so users can
# rename it at will.
mtl_name = 'AperturePBR_Opacity' if 'Opacity' in filename else 'AperturePBR_Translucent'
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, os.path.join(dirname, filename))
success, _ = kit.commands.execute('CreateMdlMaterialPrimCommand',
mtl_url=relative_file_path,
mtl_name=mtl_name,
mtl_path=new_material_path,
select_new_prim=True,
)
def filter_handler(filename: str, _, extension_option):
if extension_option == '.mdl':
return filename.lower().endswith('.mdl')
return True
file_importer = get_file_importer()
file_importer.show_window(
title=f'Select MDL File',
import_button_label="Select",
import_handler=create_material_from_mdl_file,
file_extension_types=[(".mdl", "Opacity or Translucent MDL file")],
file_filter_handler=filter_handler,
)
def open_add_material_dialog():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
for mesh_hash in list(source_meshes):
open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage)
from pxr import Usd
from omni import usd
def find_source_mesh_hash_prim(current_stage, prim):
if not current_stage.GetPrimAtPath('/RootNode/meshes'):
return prim
search_prim = prim
valid_paths = ['/RootNode/meshes', '/RootNode/instances']
while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString not in valid_paths:
search_prim = search_prim.GetParent()
if not search_prim:
return None
if 'mesh_' in Usd.Prim.GetName(search_prim):
return search_prim
_, mesh_hash, __ = Usd.Prim.GetName(search_prim).split('_')
mesh_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}'
return current_stage.GetPrimAtPath(mesh_prim_path)
def find_inst_hash_prim(instance_mesh):
search_prim = instance_mesh
root_path = '/RootNode/instances'
while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString != root_path:
search_prim = search_prim.GetParent()
if not search_prim:
return None
return search_prim
from omni import usd, kit
from pxr import Sdf
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def set_preserve_original_draw_call(enabled: bool = False):
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
for mesh_prim in source_meshes:
kit.commands.execute(
'CreateUsdAttributeCommand',
prim=mesh_prim,
attr_name='preserveOriginalDrawCall',
attr_type=Sdf.ValueTypeNames.Int,
attr_value=1 if enabled else 0
)
from omni.kit.ui import get_custom_glyph_code
from omni import usd
import omni.ui as ui
from . import mesh_utils
from . import add_model
from . import add_material
from . import preserve_draw_calls
from . import select_source_mesh
def _build_fix_mesh_geometry_menu_item():
tooltip = ''.join([
'Interpolation Mode\n',
'OBS: Operation Can\'t be undone\n',
' RTX Remix runtime only supports "vertex" interpolation mode, in which "points", "normals" and "uvs" arrays ',
'must have the same length, but DCC tools usually export the mesh using "faceVarying" interpolation mode.',
'This operation reorganizes the geometry to be compatible with the runtime. See:\n',
' "Interpolation of Geometric Primitive Variables" - https://openusd.org/dev/api/class_usd_geom_primvar.html',
'\n\nThis operation only applies for meshes inside the mods folder, not the captured ones.',
])
ui.MenuItem(
"Fix Meshes Geometry",
triggered_fn=mesh_utils.fix_meshes_geometry,
enabled=any([
not mesh_utils.is_a_captured_mesh(mesh)
for mesh in mesh_utils.get_selected_mesh_prims().values()
]),
tooltip=tooltip
)
def _build_setup_for_mesh_replacements_menu_item():
tooltip = ''.join([
"Export the original mesh to a selected location and setup the references to work within the runtime so you",
" can focus on remodeling the mesh and export back at the same location."
])
ui.MenuItem(
"Setup for Mesh Replacement",
triggered_fn=add_model.open_mesh_replacement_setup_dialog,
enabled=any([
mesh_utils.is_a_captured_mesh(mesh)
for mesh in mesh_utils.get_selected_mesh_prims().values()
]),
tooltip=tooltip
)
def _build_add_model_menu_item():
tooltip = ''.join([
"Add external authored meshes to the prim, setting up properly to work within the runtime."
])
ui.MenuItem(
"Add Model",
triggered_fn=add_model.open_add_model_dialog,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_add_material_menu_item():
tooltip = ''.join([
"Add a material defined from an external MDL file to the selected prim."
])
ui.MenuItem(
"Add Material",
triggered_fn=add_material.open_add_material_dialog,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_preserve_original_draw_call_menu_item():
tooltip = ''.join([
"Add a 'custom int preserveOriginalDrawCall' attribute set to '1' to the mesh_HASH prim. Used to indicate to",
" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom ",
" lights without removing the original mesh from rendering."
])
ui.MenuItem(
"Preserve",
triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(True),
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_dont_preserve_original_draw_call_menu_item():
tooltip = ''.join([
"Add a 'custom int preserveOriginalDrawCall' attribute set to '0' to the mesh_HASH prim. Used to indicate to",
" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom ",
" lights without removing the original mesh from rendering."
])
ui.MenuItem(
"Don't Preserve",
triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(False),
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_select_source_meshes_menu():
tooltip = ''.join([
"Selects the corresponding mesh_HASH the prim is related to."
])
ui.MenuItem(
"Select Source Mesh (Shift + F)",
triggered_fn=select_source_mesh.select_source_meshes,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def build_rtx_remix_menu(event):
icon = get_custom_glyph_code("${glyphs}/menu_create.svg")
with ui.Menu(f' {icon} RTX Remix'):
_build_fix_mesh_geometry_menu_item()
_build_setup_for_mesh_replacements_menu_item()
_build_add_model_menu_item()
_build_add_material_menu_item()
with ui.Menu(f'Original Draw Call Preservation'):
_build_preserve_original_draw_call_menu_item()
_build_dont_preserve_original_draw_call_menu_item()
_build_select_source_meshes_menu()
from omni import usd
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def select_source_meshes():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
paths = [mesh.GetPath().pathString for mesh in source_meshes]
selection = usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths(paths, False)
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import ekozerski.rtxremixtools
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
@omni.kit.test.omni_test_registry(guid="f898a949-bacc-41f5-be56-b4eb8923f54e")
async def test_hello_public_function(self):
result = ekozerski.rtxremixtools.some_public_function(4)
self.assertEqual(result, 256)
@omni.kit.test.omni_test_registry(guid="4626d574-659f-4a85-8958-9fa8588fbce3")
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
[core]
reloadable = true
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.0.2"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Emanuel Kozerski"]
# The title and description fields are primarily for displaying extension info in UI
title = "RTX Remix Tools"
description="Simple toolkit for creating remixing assets compatible with RTX Remix runtime"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/Ekozmaster/Nvidia-Omniverse-RTX-Remix-Tools"
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["Tool", "Toolkit", "Tools", "RTX", "Remix"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import ekozerski.rtxremixtools".
[[python.module]]
name = "ekozerski.rtxremixtools"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
# Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [0.0.3] - 2023-12-22
- "Add Model", "Add Material" and "Fix Mesh Geometry" also works when not in a capture scene now.
- Fixed somes errors when using "Fix Mesh Geometry" option in some meshes.
- Added "Shift + F" hotkey to "Select Source Mesh".
- Fixed error when using "Setup for Mesh Replacement" on captures which nests original game meshes inside a "ref" Xform.
- Added convertion of many "primvar:*" name variations for UV-related primvars to "primvars:st" while discarding extra UV maps.
- Removing unused primvars "displayColor" and "displayOpacity".
- Xforms from added models and materials now are named according to the imported file rather than Xform_HASH_x
## [0.0.2] - 2023-08-28
- Fixing relative paths converted to absolute on the "Fix Meshes Geometry" function.
- Picking best UV map available between all primvars and discarding everything else in the "Fix Meshes Geometry"
- Removing unused primvars when using the "Fix Meshes Geometry".
- Few more bugfixes.
## [0.0.1] - 2023-08-25
- Initial version
- Added "Fix Meshes Geometry" option converting interpolation mode to "vertex".
- Added "Setup for Mesh Replacement" option to export the original mesh for remodeling by external DCC tools.
- Added "Add Model" option to add external authored .USD models to the mesh_HASH prim.
- Added "Add Material" option to add MDL materials to the mesh_HASH prim.
- Added "Original Draw Call Preservation" submenu to set.
- Added "Select Source Mesh" option to quickly select the mesh_HASH prim.
# RTX Remix Tools [ekozerski.rtxremixtools]
Focusing on improving RTX Remix modding workflows, this extension is designed to speed up iteration when producing assets and mods by providing useful UI operations inside Omniverse apps like USD Composer/Create or Code.
It provides some options for the "Right click" context menu to setup ideal replacement assets, as well as some converting operations to ensure assets will be compatible with the Remix runtime.
It is primarily designed to operate on Remix captured scenes, so users can have instant feedbacks on what their mods are gonna look like in the game scenes and iterate faster.
## Available Tools
### Fix Meshes Geometry
<i>(Operation is performed on every mesh of a USD/USDA source file and can\'t be undone)</i>
Interpolation Mode
- RTX Remix runtime only supports meshes with "vertex" interpolation mode, in which "points" "normals" and "uvs" arrays
must have the same length, but DCC tools usually export the mesh using "faceVarying" interpolation mode.
This operation reorganizes the geometry to be compatible with the runtime.
- See: "Interpolation of Geometric Primitive Variables" - https://openusd.org/dev/api/class_usd_geom_primvar.html
- This operation only applies for meshes inside the mods folder, not the captured ones.
UV Maps
- The runtime supports one single UV map per mesh, which should have one of a few known names, so this script finds many variations, picks one and renames to the standard "primvars:st", while also setting the appropriate type as "TextureCoordinate" (TexCoord2fArray / TexCoord2f[]). The other UVmaps are discarded.
Unused Primvars
- displayColor and displayOpacity are now removed from the mesh.
### Setup for Mesh Replacement
Exports the selected mesh in a selected path, already setting up the replacements and references to work in the runtime, so for every change the user only needs to:
- Open the exported mesh in it's DCC of choice, make the changes and export again (with the right settings, triangulating faces, no materials, etc.)
- Back in OV, refresh the reference to see the changes in the captured scene.
- Use the "Fix Meshes Geometry" again to make it Remix-compatible.
- Enjoy.
The original mesh is kept in case the user only wants to add more models. Make sure to delete it if the intention is to completely replace the original mesh.
### Add Model
If the user already has authored USD models, this option allows to select multiple models and add to the mesh_HASH prim.
### Add Material
This option allows to select a material .MDL file (AperturePBR_Opacity.mdl or AperturePBR_Translucent.mdl) to add a material prim to the mesh_HASH prim.
### Original Draw Call Preservation
Allows to set the "custom int preserveOriginalDrawCall" attribute to indicate whether the runtime should be forced to render the original mesh or not. Must be set to 1 when placing custom lights or else the original mesh disappears. PS: Remember to set this to 0 if you want to make a mesh replacement and remove the original mesh.
### Select Source Mesh
Quick way to select the originial source mesh_HASH prim in the scene when you have an instance prim selected.
<br>
## Things to Keep in mind
- In a capture scene, any changes made to the "inst_SOMEHASH_x" prims won't show up in the runtime, so every changes must be done in the "mesh_SOMEHASH" they're referencing. Whenever the user clicks a inst_ prim to perform an action like Fixing geometry or Add Model (Ex: Right clicking in the 3D viewport), this tool will try to find the referenced mesh_SOMEHASH and perform the operations in it instead.
- Having that in mind, always keep an eye in the "Layers" tab to check if you have done any changes to the "instances" path. Try to delete those changes as much as possible.
- The only material types that work in the runtime are described in the AperturePBR_Opacity.MDL and AperturePBR_Translucent.MDL, and every mesh must be triangulated. If you want to add a model you got from somewhere else like an asset store, make sure to convert the assets to work in the runtime.
- When placing lights in the scene, it is necesssary to set an int "preserveOriginalDrawCall" to "1" in order to keep rendering the original mesh. If another layer is setting this flag somewhere and you want to replace/remove the original mesh in your own layer, you will notice that the original mesh can't be removed without setting this flag back to "0". You can do that on your own layer, set it back to "0", but make sure your layer comes on top of the other one that sets it to true.
ekozerski.rtxremixtools
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"ekozerski.rtxremixtools"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
347
reStructuredText
15.571428
43
0.634006
rcervellione-nv/omni.rhinocompute/CONTRIBUTING.md
## Contribution Rules
#### Issue Tracking
* All enhancement, bugfix, or change requests must begin with the creation of a [TensorRT Issue Request](https://github.com/nvidia/TensorRT/issues).
* The issue request must be reviewed by TensorRT engineers and approved prior to code review.
#### Coding Guidelines
- All source code contributions must strictly adhere to the [TensorRT Coding Guidelines](CODING-GUIDELINES.md).
- In addition, please follow the existing conventions in the relevant file, submodule, module, and project when you add new code or when you extend/fix existing functionality.
- To maintain consistency in code formatting and style, you should also run `clang-format` on the modified sources with the provided configuration file. This applies TensorRT code formatting rules to:
- class, function/method, and variable/field naming
- comment style
- indentation
- line length
- Format git changes:
```bash
# Commit ID is optional - if unspecified, run format on staged changes.
git-clang-format --style file [commit ID/reference]
```
- Format individual source files:
```bash
# -style=file : Obtain the formatting rules from .clang-format
# -i : In-place modification of the processed file
clang-format -style=file -i -fallback-style=none <file(s) to process>
```
- Format entire codebase (for project maintainers only):
```bash
find samples plugin -iname *.h -o -iname *.c -o -iname *.cpp -o -iname *.hpp \
| xargs clang-format -style=file -i -fallback-style=none
```
- Avoid introducing unnecessary complexity into existing code so that maintainability and readability are preserved.
- Try to keep pull requests (PRs) as concise as possible:
- Avoid committing commented-out code.
- Wherever possible, each PR should address a single concern. If there are several otherwise-unrelated things that should be fixed to reach a desired endpoint, our recommendation is to open several PRs and indicate the dependencies in the description. The more complex the changes are in a single PR, the more time it will take to review those changes.
- Write commit titles using imperative mood and [these rules](https://chris.beams.io/posts/git-commit/), and reference the Issue number corresponding to the PR. Following is the recommended format for commit texts:
```
#<Issue Number> - <Commit Title>
<Commit Body>
```
- Ensure that the build log is clean, meaning no warnings or errors should be present.
- Ensure that all `sample_*` tests pass prior to submitting your code.
- All OSS components must contain accompanying documentation (READMEs) describing the functionality, dependencies, and known issues.
- See `README.md` for existing samples and plugins for reference.
- All OSS components must have an accompanying test.
- If introducing a new component, such as a plugin, provide a test sample to verify the functionality.
- To add or disable functionality:
- Add a CMake option with a default value that matches the existing behavior.
- Where entire files can be included/excluded based on the value of this option, selectively include/exclude the relevant files from compilation by modifying `CMakeLists.txt` rather than using `#if` guards around the entire body of each file.
- Where the functionality involves minor changes to existing files, use `#if` guards.
- Make sure that you can contribute your work to open source (no license and/or patent conflict is introduced by your code). You will need to [`sign`](#signing-your-work) your commit.
- Thanks in advance for your patience as we review your contributions; we do appreciate them!
#### Pull Requests
Developer workflow for code contributions is as follows:
1. Developers must first [fork](https://help.github.com/en/articles/fork-a-repo) the [upstream](https://github.com/nvidia/TensorRT) TensorRT OSS repository.
2. Git clone the forked repository and push changes to the personal fork.
```bash
git clone https://github.com/YOUR_USERNAME/YOUR_FORK.git TensorRT
# Checkout the targeted branch and commit changes
# Push the commits to a branch on the fork (remote).
git push -u origin <local-branch>:<remote-branch>
```
3. Once the code changes are staged on the fork and ready for review, a [Pull Request](https://help.github.com/en/articles/about-pull-requests) (PR) can be [requested](https://help.github.com/en/articles/creating-a-pull-request) to merge the changes from a branch of the fork into a selected branch of upstream.
* Exercise caution when selecting the source and target branches for the PR.
Note that versioned releases of TensorRT OSS are posted to `release/` branches of the upstream repo.
* Creation of a PR creation kicks off the code review process.
* Atleast one TensorRT engineer will be assigned for the review.
* While under review, mark your PRs as work-in-progress by prefixing the PR title with [WIP].
4. Since there is no CI/CD process in place yet, the PR will be accepted and the corresponding issue closed only after adequate testing has been completed, manually, by the developer and/or TensorRT engineer reviewing the code.
#### Signing Your Work
* We require that all contributors "sign-off" on their commits. This certifies that the contribution is your original work, or you have rights to submit it under the same license, or a compatible license.
* Any contribution which contains commits that are not Signed-Off will not be accepted.
* To sign off on a commit you simply use the `--signoff` (or `-s`) option when committing your changes:
```bash
$ git commit -s -m "Add cool feature."
```
This will append the following to your commit message:
```
Signed-off-by: Your Name <[email protected]>
```
* Full text of the DCO:
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
```
```
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.
(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
```
7,222
Markdown
50.22695
387
0.7545
rcervellione-nv/omni.rhinocompute/README.md
# About
This is an extension designed to run in a Nvidia Omniverse application such as Create or Machinima. The extension creates a link to a Rhino.Compute Server [https://developer.rhino3d.com/guides/compute/] allowing you to run Rhino commands such as quad remesh or Grasshopper files.
This is designed to be a sample to extend. there are examples for using some basic rhino command like volume and quad remesh as well as running a Grasshopper script. Use this as a starting point to integrate your grasshopper scripts and functions directly into Omniverse and create the necessary UI elements.

# Using It
- "app" - It is a folder link to the location of your *Omniverse Kit* based app.
- "exts" - is the folder where you add to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you install a few extensions that will make python experience better.
Look for "cerver.util.rhinocompute" extension in extension manager inside Omniverse Create and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
The first time you enable it will take some time to load. this is because all of the required packages from rhino and rhino compute will be installed into your Omniverse python library via a automatic pip install.
# 3rd party Libraries
This project references 3rd party libraries with the following licensing
Rhino.compute
https://github.com/mcneel/compute.rhino3d/blob/master/LICENSE
Rhino3dm
https://github.com/mcneel/rhino3dm/blob/main/LICENSE
Plotly
https://github.com/plotly/plotly.py/blob/master/LICENSE.txt
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
import omni.ui as ui
import omni.usd
from .RhinoComputeFunctions import RhinoFunctions, GrasshopperFunctions
from .RhinoComputUtil import SaveSelectedAs3dm
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
self.computeUrl="http://localhost:6500/"
self.progressbarprog = 0
self.progbarwindow = None
self.excludeLastGroupAsLayer = False
def on_startup(self, ext_id):
#print("[omni.RhinoCompute] MyExtension startup")
def serverAddrChanged(addr):
self.computeUrl = addr
self._window = ui.Window("Rhino Compute Functions", width=300, height=400)
with self._window.frame:
with ui.VStack():
ui.Label("Compute Server Address")
serverAddrUi = ui.StringField(height = 30)
serverAddrUi.model.set_value(self.computeUrl)
serverAddrUi.model.add_value_changed_fn(lambda m:serverAddrChanged(m.get_value_as_string()))
with ui.CollapsableFrame("Util Functions", height = 0):
with ui.VStack():
ui.Button("save sel as 3dm", clicked_fn=lambda: SaveSelectedAs3dm(self,"S:/test.3dm"), height=40)
ui.Button("save all as 3dm", clicked_fn=lambda: RhinoFunctions.SaveAllAs3DM_UI(self), height=40)
with ui.CollapsableFrame("Mesh Functions", height = 0):
with ui.VStack():
ui.Button("Volume", clicked_fn=lambda: RhinoFunctions.MeshVolume(self), height=40)
ui.Button("Mesh Bool Union", clicked_fn=lambda: RhinoFunctions.MeshBoolUnion(self), height=40)
ui.Button("Quad Remesh", clicked_fn=lambda: RhinoFunctions.MeshQuadRemesh(self), height=40)
ui.Button("Mesh Offset", clicked_fn=lambda: RhinoFunctions.MeshOffset(self), height=40)
with ui.CollapsableFrame("Grasshopper Functions", height = 0):
with ui.VStack():
ui.Button("Random Diamonds Script", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds_UI(self), height=40)
def on_shutdown(self):
print("[omni.RhinoCompute] MyExtension shutdown")
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import compute_rhino3d.Util
import compute_rhino3d.Mesh
import compute_rhino3d.Grasshopper as gh
import rhino3dm
import json
import omni.ext
import omni.ui as ui
from pxr import Usd, UsdGeom, Gf
import omni.usd
import asyncio
def convertSelectedUsdMeshToRhino():
context = omni.usd.get_context()
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(m) for m in context.get_selection().get_selected_prim_paths() ]
#filter out prims that are not mesh
selected_prims = [
prim for prim
in prims
if UsdGeom.Mesh(prim)]
#setup var to hold the mesh, its name in the dict
sDict = []
#add the converted prims to the dict
for m in selected_prims:
sDict.append({"Name": m.GetName(), "Mesh":UsdMeshToRhinoMesh(m)})
return sDict
def UsdMeshToRhinoMesh(usdMesh):
#array for the mesh items
vertices = []
faces = []
#get the USD points
points = UsdGeom.Mesh(usdMesh).GetPointsAttr().Get()
#setup the items needed to deal with world and local transforms
xform_cache = UsdGeom.XformCache()
mtrx_world = xform_cache.GetLocalToWorldTransform(usdMesh)
#create the rhino mesh
mesh = rhino3dm.Mesh()
#convert the USD points to rhino points
for p in points:
world_p = mtrx_world.Transform(p)
mesh.Vertices.Add(world_p[0],world_p[1],world_p[2])
#faces we can extend directly into the aray becaue they are just ints
faces.extend( UsdGeom.Mesh(usdMesh).GetFaceVertexIndicesAttr().Get())
faceCount = UsdGeom.Mesh(usdMesh).GetFaceVertexCountsAttr().Get()
ct = 0
#add the face verts, USD uses a flat list of ints so we need to deal with
#3 or 4 sided faces. USD supports ngons but that is not accounted for
#ToDo: Deal with ngons
for i in range(0,len(faceCount)):
fc=faceCount[i]
if fc is 3:
mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2])
if fc is 4:
mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2], faces[ct+3])
ct+=fc
#compute normals, i dont use the USD normals here but you could
mesh.Normals.ComputeNormals()
mesh.Compact()
return mesh
def save_stage():
stage = omni.usd.get_context().get_stage()
stage.GetRootLayer().Save()
omni.client.usd_live_process()
def RhinoMeshToUsdMesh( rootUrl, meshName, rhinoMesh: rhino3dm.Mesh , primPath=None):
#get the stage
stage = omni.usd.get_context().get_stage()
# Create the geometry inside of "Root"
meshPrimPath = rootUrl + meshName
mesh = UsdGeom.Mesh.Define(stage, meshPrimPath)
# Add all of the vertices
points = []
for i in range(0,len(rhinoMesh.Vertices)):
v = rhinoMesh.Vertices[i]
points.append(Gf.Vec3f(v.X, v.Y, v.Z))
mesh.CreatePointsAttr(points)
# Calculate indices for each triangle
faceIndices = []
faceVertexCounts = []
for i in range(0, rhinoMesh.Faces.Count):
fcount=3
curf = rhinoMesh.Faces[i]
faceIndices.append(curf[0])
faceIndices.append(curf[1])
faceIndices.append(curf[2])
if curf[2] != curf[3]:
faceIndices.append(curf[3])
fcount=4
#print(f"{fcount} : {curf}")
faceVertexCounts.append(fcount)
mesh.CreateFaceVertexIndicesAttr(faceIndices)
mesh.CreateFaceVertexCountsAttr(faceVertexCounts)
# Add vertex normals
meshNormals = []
for n in rhinoMesh.Normals:
meshNormals.append(Gf.Vec3f(n.X,n.Y,n.Z))
mesh.CreateNormalsAttr(meshNormals)
def SaveRhinoFile(rhinoMeshes, path):
model = rhino3dm.File3dm()
[ model.Objects.AddMesh(m) for m in rhinoMeshes]
model.Write(path)
def SaveSelectedAs3dm(self,path):
selectedMeshes = convertSelectedUsdMeshToRhino()
meshobj = [d['Mesh'] for d in selectedMeshes]
SaveRhinoFile(meshobj, path)
def SaveAllas3DM(self, path):
#get the stage
stage = omni.usd.get_context().get_stage()
#get all prims that are meshes
meshPrims = [stage.GetPrimAtPath(prim.GetPath()) for prim in stage.Traverse() if UsdGeom.Mesh(prim)]
#make a rhino file
rhinoFile = rhino3dm.File3dm()
uniqLayers = {}
#figure out how many elements there are (to implament progress bar in future)
numPrims = len(meshPrims)
curPrim = 0
#loop over all the meshes
for mp in meshPrims:
#convert from usd mesh to rhino mesh
rhinoMesh = UsdMeshToRhinoMesh(mp)
objName = mp.GetName()
rhinoAttr = rhino3dm.ObjectAttributes()
dataOnParent = False
#get the properties on the prim
bimProps = None
parentPrim = mp.GetParent()
#see if this prim has BIM properties (from revit)
if parentPrim:
bimProps = mp.GetPropertiesInNamespace("BIM")
dataOnParent = False
#see if this prims parent has BIM properties (from revit)
if not bimProps:
bimProps = parentPrim.GetPropertiesInNamespace("BIM")
dataOnParent = True
#if no bim properties just add regular ones
if not bimProps :
bimProps = mp.GetProperties()
dataOnParent = False
for p in bimProps:
try:
pName = p.GetBaseName()
var = p.Get()
rhinoAttr.SetUserString(pName, str(var))
except Exception :
pass
# get the prims path and use that to create nested layers in rhino
primpath = str(mp.GetPath())
sepPrimPath = primpath.split('/')
sepPrimPath.pop(0)
sepPrimPath.pop()
# this will ajust the layer structure if the data is from the revit connector
# or if you just want to prune the last group in the export dialogue
if dataOnParent or self.excludeLastGroupAsLayer:
sepPrimPath.pop()
nestedLayerName = '::'.join(sepPrimPath)
ct=0
curLayer = ""
#loop over all the prim paths to created the nested layers in rhino
for pp in sepPrimPath:
if ct == 0:
curLayer += pp
else:
curLayer += f"::{pp}"
#check if the layer exists, if not make it
if not curLayer in uniqLayers :
layer = rhino3dm.Layer()
if ct>0:
prevLayer = curLayer.split('::')
prevLayer.pop()
prevLayer = '::'.join(prevLayer)
layer.ParentLayerId = rhinoFile.Layers.FindIndex(uniqLayers[prevLayer]).Id
layer.Color = (255,255,255,255)
layer.Name = pp
idx = rhinoFile.Layers.Add(layer)
uniqLayers[curLayer]= int(idx)
ct+=1
rhinoAttr.Name = objName
#print(str(uniqLayers[nestedLayerName]))
rhinoAttr.LayerIndex = int(str(uniqLayers[nestedLayerName]))
#add the mesh and its attributes to teh rhino file
rhinoFile.Objects.AddMesh(rhinoMesh, rhinoAttr)
curPrim += 1
self.progressbarprog = curPrim/numPrims
#save it all
rhinoFile.Write(path)
print("completed saving")
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.3"
# The title and description fields are primarily for displaying extension info in UI
title = "Rhino Compute for Omniverse"
description="Omniverse intergration with a rhino.compute server"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "../../README.md"
# URL of the extension source repository.
repository = "https://github.com/rcervellione-nv/omni.rhinocompute"
# One of categories for UI.
category = "Utility"
# Keywords for the extension
keywords = ["kit", "Rhino", "Compute"]
# Icon to show in the extension manager
icon = "data/computeTerminal.png"
# Preview to show in the extension manager
preview_image = "data/CreateAndCompute.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "cerver.util.rhinocompute"
994
TOML
28.264705
105
0.747485
vinjn/llm-metahuman/README.md
# LLM MetaHuman
LLM MetaHuman is an open solution for AI-powered photorealistic digital humans.
## Preparation steps
- Install [Omniverse Launcher](https://www.nvidia.com/en-us/omniverse/download/)
- Inside Omniverse Launcher, Install `Audio2Face`.
- Install [Epic Games Store](https://store.epicgames.com/en-US/)
- Inside Epic Games Store, Install Unreal Engine 5.x.
- Follow [Audio2Face to UE Live Link Plugin](https://docs.omniverse.nvidia.com/audio2face/latest/user-manual/livelink-ue-plugin.html) to connect Audi2Face to Unreal Engine.
## Launch Audio2Face headless
## Launch llm.py
## Launch Unreal Engine Metahuman
"""
This demo script shows how to send audio data to Audio2Face Streaming Audio Player via gRPC requests.
There are two options:
* Send the whole track at once using PushAudioRequest()
* Send the audio chunks seuqntially in a stream using PushAudioStreamRequest()
For the second option this script emulates the stream of chunks, generated by splitting an input WAV audio file.
But in a real application such stream of chunks may be aquired from some other streaming source:
* streaming audio via internet, streaming Text-To-Speech, etc
gRPC protocol details could be find in audio2face.proto
"""
import sys
import time
import audio2face_pb2
import audio2face_pb2_grpc
import grpc
import numpy as np
import soundfile
def push_audio_track(url, audio_data, samplerate, instance_names):
"""
This function pushes the whole audio track at once via PushAudioRequest()
PushAudioRequest parameters:
* audio_data: bytes, containing audio data for the whole track, where each sample is encoded as 4 bytes (float32)
* samplerate: sampling rate for the audio data
* instance_names: prim path of the Audio2Face Streaming Audio Player on the stage, were to push the audio data
* block_until_playback_is_finished: if True, the gRPC request will be blocked until the playback of the pushed track is finished
The request is passed to PushAudio()
"""
block_until_playback_is_finished = True # ADJUST
for instance_name in instance_names:
with grpc.insecure_channel(url) as channel:
stub = audio2face_pb2_grpc.Audio2FaceStub(channel)
request = audio2face_pb2.PushAudioRequest()
request.audio_data = audio_data.astype(np.float32).tobytes()
request.samplerate = samplerate
request.instance_name = instance_name
request.block_until_playback_is_finished = block_until_playback_is_finished
print("Sending audio data...")
response = stub.PushAudio(request)
if response.success:
print("SUCCESS")
else:
print(f"ERROR: {response.message}")
print("Closed channel")
def push_audio_track_stream(url, audio_data, samplerate, instance_names):
"""
This function pushes audio chunks sequentially via PushAudioStreamRequest()
The function emulates the stream of chunks, generated by splitting input audio track.
But in a real application such stream of chunks may be aquired from some other streaming source.
The first message must contain start_marker field, containing only meta information (without audio data):
* samplerate: sampling rate for the audio data
* instance_names: prim path of the Audio2Face Streaming Audio Player on the stage, were to push the audio data
* block_until_playback_is_finished: if True, the gRPC request will be blocked until the playback of the pushed track is finished (after the last message)
Second and other messages must contain audio_data field:
* audio_data: bytes, containing audio data for an audio chunk, where each sample is encoded as 4 bytes (float32)
All messages are packed into a Python generator and passed to PushAudioStream()
"""
chunk_size = samplerate // 10 # ADJUST
sleep_between_chunks = 0.04 # ADJUST
block_until_playback_is_finished = True # ADJUST
with grpc.insecure_channel(url) as channel:
print("Channel creadted")
stub = audio2face_pb2_grpc.Audio2FaceStub(channel)
for instance_name in instance_names:
def make_generator():
start_marker = audio2face_pb2.PushAudioRequestStart(
samplerate=samplerate,
instance_name=instance_name,
block_until_playback_is_finished=block_until_playback_is_finished,
)
# At first, we send a message with start_marker
yield audio2face_pb2.PushAudioStreamRequest(start_marker=start_marker)
# Then we send messages with audio_data
for i in range(len(audio_data) // chunk_size + 1):
time.sleep(sleep_between_chunks)
chunk = audio_data[i * chunk_size : i * chunk_size + chunk_size]
yield audio2face_pb2.PushAudioStreamRequest(audio_data=chunk.astype(np.float32).tobytes())
request_generator = make_generator()
print("Sending audio data...")
response = stub.PushAudioStream(request_generator)
if response.success:
print("SUCCESS")
else:
print(f"ERROR: {response.message}")
print("Channel closed")
def main():
"""
This demo script shows how to send audio data to Audio2Face Streaming Audio Player via gRPC requests.
There two options:
* Send the whole track at once using PushAudioRequest()
* Send the audio chunks seuqntially in a stream using PushAudioStreamRequest()
For the second option this script emulates the stream of chunks, generated by splitting an input WAV audio file.
But in a real application such stream of chunks may be aquired from some other streaming source:
* streaming audio via internet, streaming Text-To-Speech, etc
gRPC protocol details could be find in audio2face.proto
"""
if len(sys.argv) < 3:
print("Format: python test_client.py PATH_TO_WAV INSTANCE_NAME")
return
# Sleep time emulates long latency of the request
sleep_time = 0.0 # ADJUST
# URL of the Audio2Face Streaming Audio Player server (where A2F App is running)
url = "localhost:50051" # ADJUST
# Local input WAV file path
audio_fpath = sys.argv[1]
# Prim path of the Audio2Face Streaming Audio Player on the stage (were to push the audio data)
instance_names = sys.argv[2:]
data, samplerate = soundfile.read(audio_fpath, dtype="float32")
# Only Mono audio is supported
if len(data.shape) > 1:
data = np.average(data, axis=1)
print(f"Sleeping for {sleep_time} seconds")
time.sleep(sleep_time)
if 0: # ADJUST
# Push the whole audio track at once
push_audio_track(url, data, samplerate, instance_names)
else:
# Emulate audio stream and push audio chunks sequentially
push_audio_track_stream(url, data, samplerate, instance_names)
if __name__ == "__main__":
main()
6,428
Python
43.337931
158
0.683261
vinjn/llm-metahuman/audio-client/llm.py
from openai import OpenAI
from pydub import AudioSegment
import gradio as gr
import requests
import os
from litellm import completion
import time
import threading
import queue
import gradio_client as gc
# XXX: increase requests speed
# https://stackoverflow.com/a/72440253
requests.packages.urllib3.util.connection.HAS_IPV6 = False
args = None
CWD = os.getcwd()
print("CWD:", CWD)
VOICE_ACTORS = ["nova", "alloy", "echo", "fable", "onyx", "shimmer"]
def timing_decorator(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"{func.__name__} cost: {elapsed_time:.2f} seconds.")
return result
return wrapper
class A2fInstance:
files_to_delete = []
instaces = []
def __init__(self, index) -> None:
self.SERVICE_HEALTHY = False
self.LIVELINK_SERVICE_HEALTHY = False
self.index = index
@timing_decorator
def post(self, end_point, data=None, verbose=True):
if not self.SERVICE_HEALTHY:
return None
if verbose:
print(f"++ {end_point}")
api_url = f"{self.base_url}/{end_point}"
try:
response = requests.post(api_url, json=data)
if response and response.status_code == 200:
if verbose:
print(response.json())
return response.json()
else:
if verbose:
print(f"Error: {response.status_code} - {response.text}")
return {"Error": response.status_code, "Reason": response.text}
except Exception as e:
print(e)
self.SERVICE_HEALTHY = False
return None
@timing_decorator
def get(self, end_point, data=None, verbose=True):
if not self.SERVICE_HEALTHY:
return None
if verbose:
print(f"++ {end_point}")
api_url = f"{self.base_url}/{end_point}"
try:
response = requests.get(api_url, json=data)
if response.status_code == 200:
if verbose:
print(response.json())
return response.json()
else:
if verbose:
print(f"Error: {response.status_code} - {response.text}")
return {"Error": response.status_code, "Reason": response.text}
except Exception as e:
print(e)
self.SERVICE_HEALTHY = False
return None
def player_setlooping(self, flag=True):
self.post(
"A2F/Player/SetLooping",
{"a2f_player": args.a2f_player_id, "loop_audio": flag},
)
def player_play(self):
self.post("A2F/Player/Play", {"a2f_player": args.a2f_player_id})
def player_pause(self):
self.post("A2F/Player/Pause", {"a2f_player": args.a2f_player_id})
def player_setrootpath(self, dir_path):
self.post(
"A2F/Player/SetRootPath",
{"a2f_player": args.a2f_player_id, "dir_path": dir_path},
)
def player_settrack(self, file_name):
self.post(
"A2F/Player/SetTrack",
{"a2f_player": args.a2f_player_id, "file_name": file_name},
)
def player_gettracks(self):
self.post("A2F/Player/GetTracks", {"a2f_player": args.a2f_player_id})
def player_gettime(self):
response = self.post(
"A2F/Player/GetTime", {"a2f_player": args.a2f_player_id}, False
)
if response and response["status"] == "OK":
return response["result"]
else:
return 0
def player_getrange(self):
response = self.post(
"A2F/Player/GetRange", {"a2f_player": args.a2f_player_id}, False
)
if response and response["status"] == "OK":
return response["result"]["work"]
else:
return (0, 0)
def generatekeys(self):
self.post("A2F/A2E/GenerateKeys", {"a2f_instance": args.a2f_instance_id})
def ActivateStreamLivelink(self, flag):
self.post(
"A2F/Exporter/ActivateStreamLivelink",
{"node_path": args.a2f_livelink_id, "value": flag},
)
def IsStreamLivelinkConnected(self):
response = self.post(
"A2F/Exporter/IsStreamLivelinkConnected",
{"node_path": args.a2f_livelink_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return False
def enable_audio_stream(self, flag):
self.post(
"A2F/Exporter/SetStreamLivelinkSettings",
{
"node_path": args.a2f_livelink_id,
"values": {"enable_audio_stream": flag},
},
)
def set_livelink_ports(
self,
livelink_host,
livelink_subject,
livelink_port,
livelink_audio_port,
):
self.post(
"A2F/Exporter/SetStreamLivelinkSettings",
{
"node_path": args.a2f_livelink_id,
"values": {
"livelink_host": livelink_host,
"livelink_subject": livelink_subject,
"livelink_port": livelink_port,
"audio_port": livelink_audio_port,
},
},
)
def get_preprocessing(self):
response = self.post(
"A2F/PRE/GetSettings",
{"a2f_instance": args.a2f_instance_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return {}
def set_preprocessing(self, settings):
settings["a2f_instance"] = args.a2f_instance_id
self.post("A2F/PRE/SetSettings", settings)
def get_postprocessing(self):
response = self.post(
"A2F/POST/GetSettings",
{"a2f_instance": args.a2f_instance_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return {}
def set_postprocessing(self, settings):
self.post(
"A2F/POST/SetSettings",
{"a2f_instance": args.a2f_instance_id, "settings": settings},
)
def setup(self):
self.base_url = f"http://{args.a2f_host}:{args.a2f_port+self.index}"
self.tts_voice = args.tts_voice
if self.index > 0:
# TODO: make it elegant
self.tts_voice = VOICE_ACTORS[self.index % len(VOICE_ACTORS)]
# always ping SERVICE_HEALTHY again in setup()
self.SERVICE_HEALTHY = True
self.ActivateStreamLivelink(True)
if not self.SERVICE_HEALTHY:
return
self.player_setrootpath(CWD)
self.player_setlooping(False)
self.LIVELINK_SERVICE_HEALTHY = self.IsStreamLivelinkConnected()
if not self.LIVELINK_SERVICE_HEALTHY:
return
self.enable_audio_stream(True)
self.set_livelink_ports(
args.livelink_host,
f"{args.livelink_subject}-{self.index}",
args.livelink_port + 10 * self.index,
args.livelink_audio_port + 10 * self.index,
)
pre_settings = self.get_preprocessing()
pre_settings["prediction_delay"] = 0
pre_settings["blink_interval"] = 1.5
self.set_preprocessing(pre_settings)
post_settings = self.get_postprocessing()
post_settings["skin_strength"] = 1.3
self.set_postprocessing(post_settings)
A2fInstance.instaces = []
openai_client = OpenAI()
gc_client: gc.Client = None
chat_ui: gr.ChatInterface = None
def run_single_pipeline(a2f, answer, a2f_peer=None):
global stop_current_a2f_play
if not a2f_peer:
a2f_peer = a2f
# print(answer)
mp3_file = text_to_mp3(answer, a2f.tts_voice)
wav_file = mp3_to_wav(mp3_file)
duration = a2f_peer.player_getrange()[1]
position = a2f_peer.player_gettime()
while position > 0 and position < duration:
print(position, duration)
if stop_current_a2f_play:
print("stop_current_a2f_play")
stop_current_a2f_play = False
return
time.sleep(1)
position = a2f_peer.player_gettime()
print("z")
time.sleep(1)
a2f.player_setrootpath(CWD)
a2f.player_settrack(wav_file)
# a2f_generatekeys()
a2f.player_play()
for file in A2fInstance.files_to_delete:
try:
os.remove(file)
except Exception:
pass
A2fInstance.files_to_delete.clear()
A2fInstance.files_to_delete.append(mp3_file)
A2fInstance.files_to_delete.append(wav_file)
current_speaker = -1
@timing_decorator
def run_pipeline(answer):
if args.a2f_instance_count == 1:
run_single_pipeline(A2fInstance.instaces[0], answer)
return
global current_speaker
if answer.startswith("("):
current_speaker = -1
elif answer.startswith("A:"):
current_speaker = 0
answer = answer[2:]
elif answer.startswith("B:"):
current_speaker = 1
answer = answer[2:]
if current_speaker < 0 or current_speaker >= args.a2f_instance_count:
return
a2f = A2fInstance.instaces[current_speaker]
if not a2f.SERVICE_HEALTHY:
return
run_single_pipeline(a2f, answer)
@timing_decorator
def text_to_mp3(text, voice):
response = openai_client.audio.speech.create(
model=args.tts_model,
voice=voice,
speed=args.tts_speed,
input=text,
)
timestamp = time.time()
mp3_filename = f"{timestamp}.mp3"
response.stream_to_file(mp3_filename)
return mp3_filename
@timing_decorator
def mp3_to_wav(mp3_filename):
sound = AudioSegment.from_mp3(mp3_filename)
sound = sound.set_frame_rate(22050)
wav_filename = f"{mp3_filename}.wav"
sound.export(wav_filename, format="wav")
return wav_filename
@timing_decorator
def get_completion(chat_history):
response = completion(
model=args.llm_model,
messages=chat_history,
api_base=args.llm_url,
stream=args.llm_streaming,
)
print(response)
return response
q = queue.Queue()
cleanup_queue = False
stop_current_a2f_play = False
def pipeline_worker():
while True:
print("--------------------------")
global cleanup_queue
global stop_current_a2f_play
if cleanup_queue:
while not q.empty():
item = q.get()
q.task_done()
if item == "cleanup_queue_token":
break
cleanup_queue = False
stop_current_a2f_play = True
item = q.get()
if item == "cleanup_queue_token":
continue
print(f"Begin: {item}")
run_pipeline(item)
print(f"End: {item}")
q.task_done()
def talk_to_peer(message):
if not gc_client:
return
result = gc_client.predict(
message, api_name="/chat" # str in 'Message' Textbox component
)
print(f"from peer: {result}")
# chat_ui.textbox.submit(None, [result, result])
# chat_ui.textbox.submit()
def predict(message, history):
print("==========================")
if message == "setup":
str = ""
for a2f in A2fInstance.instaces:
a2f.setup()
str += f"A2F running: {a2f.SERVICE_HEALTHY}\n"
str += f"Live Link running: {a2f.LIVELINK_SERVICE_HEALTHY}\n"
yield str
return
if message == "ping":
for a2f in A2fInstance.instaces:
a2f.post("")
a2f.get("")
yield "A2F ping"
return
if message == "redo":
for a2f in A2fInstance.instaces:
a2f.player_play()
yield "A2F redo"
return
if message == "stop":
global cleanup_queue
cleanup_queue = True
q.put("cleanup_queue_token")
yield "stopped"
return
if message.startswith("peer"):
items = message.split()
if len(items) >= 2:
gradio_port = int(items[1])
# TODO: support non localhost
args.gradio_peer_url = f"http://{args.gradio_host}:{gradio_port}/"
global gc_client
gc_client = gc.Client(args.gradio_peer_url)
yield f"I will chat with another llm-metahuman: {args.gradio_peer_url}"
return
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
# start_time = time.time()
response = get_completion(history_openai_format)
yield ".."
# global cleanup_queue
# cleanup_queue = True
# q.put("cleanup_queue_token")
if args.llm_streaming:
# create variables to collect the stream of chunks
UNUSED_collected_chunks = []
collected_messages = []
complete_sentences = ""
# iterate through the stream of events
for chunk in response:
# chunk_time = (
# time.time() - start_time
# ) # calculate the time delay of the chunk
UNUSED_collected_chunks.append(chunk) # save the event response
chunk_message = chunk.choices[0].delta.content # extract the message
if not chunk_message:
continue
collected_messages.append(chunk_message) # save the message
# print(
# f"Message {chunk_time:.2f} s after request: {chunk_message}"
# ) # print the delay and text
print(chunk_message)
if chunk_message in [
".",
"!",
"?",
"。",
"!",
"?",
] or chunk_message.endswith("\n"):
# if not chunk_message or "\n" in chunk_message:
one_sentence = "".join([m for m in collected_messages if m is not None])
if len(one_sentence) < 10:
# ignore short sentences
continue
collected_messages = []
complete_sentences += one_sentence
q.put(one_sentence)
# run_pipeline(one_sentence)
yield complete_sentences
talk_to_peer(one_sentence)
# print the time delay and text received
# print(f"Full response received {chunk_time:.2f} seconds after request")
# # clean None in collected_messages
# collected_messages = [m for m in collected_messages if m is not None]
# full_reply_content = "".join([m for m in collected_messages])
# print(f"Full conversation received: {full_reply_content}")
# yield full_reply_content
else:
if len(response.choices[0].message.content) == 0:
return
answer = response.choices[0].message.content
yield answer
run_pipeline(answer)
def main():
import argparse
parser = argparse.ArgumentParser(description="llm.py arguments")
# gradio settings
parser.add_argument("--a2f_instance_count", type=int, default=1)
parser.add_argument("--gradio_host", default="localhost")
parser.add_argument("--gradio_port", type=int, default=7860)
parser.add_argument(
"--gradio_peer_url",
default=None,
help="the gradio peer that this gradio instance will chat with. Default value is None, which means chat with a human.",
)
# llm / litellm settings
parser.add_argument("--llm_engine", default="gpt", choices=["gpt", "llama2"])
parser.add_argument(
"--llm_model", default=None, help="https://docs.litellm.ai/docs/providers"
)
parser.add_argument("--llm_url", default=None)
parser.add_argument(
"--llm_streaming", default=True, action=argparse.BooleanOptionalAction
)
# audio2face settings
parser.add_argument("--a2f_host", default="localhost")
parser.add_argument("--a2f_port", default=8011, type=int)
parser.add_argument("--a2f_instance_id", default="/World/audio2face/CoreFullface")
parser.add_argument("--a2f_player_id", default="/World/audio2face/Player")
parser.add_argument("--a2f_livelink_id", default="/World/audio2face/StreamLivelink")
# tts settings
parser.add_argument("--tts_model", default="tts-1", choices=["tts-1", "tts-1-hd"])
parser.add_argument("--tts_speed", default=1.1, type=float)
# livelink settings
parser.add_argument("--livelink_host", default="localhost")
parser.add_argument("--livelink_port", default=12030, type=int)
parser.add_argument("--livelink_subject", default="Audio2Face")
parser.add_argument("--livelink_audio_port", default=12031, type=int)
parser.add_argument(
"--tts_voice",
default="nova",
choices=VOICE_ACTORS,
help="https://platform.openai.com/docs/guides/text-to-speech",
)
global args
args = parser.parse_args()
if not args.llm_model:
if args.llm_engine == "gpt":
args.llm_model = args.llm_model or "gpt-3.5-turbo"
elif args.llm_engine == "llama2":
args.llm_model = args.llm_model or "ollama/llama2"
args.llm_url = args.llm_url or "http://localhost:11434"
threading.Thread(target=pipeline_worker, daemon=True).start()
for i in range(args.a2f_instance_count):
a2f = A2fInstance(i)
a2f.setup()
A2fInstance.instaces.append(a2f)
global chat_ui
chat_ui = gr.ChatInterface(
predict,
title=f"llm-metahuman @{args.gradio_port}",
examples=["hello", "tell me 3 jokes", "what's the meaning of life?"],
)
chat_ui.queue().launch(server_name=args.gradio_host, server_port=args.gradio_port)
q.join()
if __name__ == "__main__":
main()
import pyttsx3
engine = pyttsx3.init() # object creation
""" RATE"""
rate = engine.getProperty("rate") # getting details of current speaking rate
print(rate) # printing current voice rate
engine.setProperty("rate", 125) # setting up new voice rate
"""VOLUME"""
volume = engine.getProperty(
"volume"
) # getting to know current volume level (min=0 and max=1)
print(volume) # printing current volume level
engine.setProperty("volume", 1.0) # setting up volume level between 0 and 1
"""VOICE"""
voices = engine.getProperty("voices") # getting details of current voice
print(voices)
engine.setProperty("voice", voices[0].id) # changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
engine.say("Hello World!")
engine.say("说什么 current speaking rate is " + str(rate))
engine.runAndWait()
engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
engine.save_to_file("Hello World", "test.mp3")
engine.runAndWait()
import random
import gradio as gr
def alternatingly_agree(message, history):
if len(history) % 2 == 0:
return f"Yes, I do think that '{message}'"
else:
return "I don't think so"
count = 0
def textbox_update(chatui_textbox):
global count
count += 1
if count % 10 == 0:
return "z"
else:
return chatui_textbox
if __name__ == "__main__":
with gr.ChatInterface(alternatingly_agree) as chat_ui:
chat_ui.textbox.change(
textbox_update,
chat_ui.textbox,
chat_ui.textbox,
every=1,
trigger_mode="once",
)
chat_ui.launch()
660
Python
18.441176
58
0.554545
vinjn/llm-metahuman/audio-client/ref/portal.py
import gradio as gr
def task1(input_text):
return "Task 1 Result: " + input_text
def task2(input_image):
return "Task 2 Result"
def task3(input_image):
return "Task 2 Result"
# interface one
iface1 = gr.Interface(
fn=task1, inputs="text", outputs="text", title="Multi-Page Interface"
)
# interface two
iface2 = gr.Interface(
fn=task2, inputs="image", outputs="text", title="Multi-Page Interface"
)
tts_examples = [
"I love learning machine learning",
"How do you do?",
]
tts_demo = gr.load(
"huggingface/facebook/fastspeech2-en-ljspeech",
title=None,
examples=tts_examples,
description="Give me something to say!",
cache_examples=False,
)
stt_demo = gr.load(
"huggingface/facebook/wav2vec2-base-960h",
title=None,
inputs="mic",
description="Let me try to guess what you're saying!",
)
demo = gr.TabbedInterface(
[iface1, iface2, tts_demo, stt_demo],
["Text-to-text", "image-to-text", "Text-to-speech", "Speech-to-text"],
)
# Run the interface
demo.launch(share=True)
import math
import gradio as gr
import plotly.express as px
import numpy as np
plot_end = 2 * math.pi
def get_plot(period=1):
global plot_end
x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)
y = np.sin(2*math.pi*period * x)
fig = px.line(x=x, y=y)
plot_end += 2 * math.pi
if plot_end > 1000:
plot_end = 2 * math.pi
return fig
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
gr.Markdown("Change the value of the slider to automatically update the plot")
period = gr.Slider(label="Period of plot", value=1, minimum=0, maximum=10, step=1)
plot = gr.Plot(label="Plot (updates every half second)")
dep = demo.load(get_plot, None, plot, every=1)
period.change(get_plot, period, plot, every=1, cancels=[dep])
if __name__ == "__main__":
demo.queue().launch()
871
Python
25.424242
94
0.6062
mnaskret/omni-tetGen/README.md
# omni-tetGen
An omniverse extension to generate soft body meshes

## Description:
omni-tetGen uses the famous tetgen mesh generator developed by Hang Si to create tetrahedral and edge meshes for soft body simulation. The extension allows for a user-friendly drag-and-drop mechanism for input mesh data in standard .obj format. Then, it runs the python tetgen wrapper to create meshes which are converted to numpy arrays and described with additional infomration like edges rest lengths or tetrahedra volumes. Generated mesh is added to the stage with additional attributes:
- edge
- edgesRestLengths
- elem
- tetrahedronsRestVolumes
- inverseMasses

## PBD .ogn node
Additionally, an omniverse node with a simple Position Based Dynamics algorithm implementation with CUDA kernels is attached in order to test generated meshes.

## Usage
- [Install omniverse](https://www.nvidia.com/en-us/omniverse/) with e.g. create environment
- Go to: Window -> Extensions -> Gear icon -> Add extension search path: `git://github.com/mnaskret/omni-tetGen.git?branch=main`
- Find Tetrahedralizer in the list of extensions and turn it on (preferably with autoload)
- In the Tetrahedralizer window you can drop any .obj file from Omniverse Content browser, choose preferred options and generate a cool mesh
- Add a graph with PBDBasicGravity node or create your own node that utilizes mesh extra attributes to have fun with your mesh
1,824
Markdown
59.833331
491
0.800987
mnaskret/omni-tetGen/config/extension.toml
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Tetrahedralizer"
description="Generates a tetrahedral mesh from an external triangle mesh."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import mnresearch.tetgen".
[[python.module]]
name = "mnresearch.tetgen"
[python.pipapi]
requirements = ['numpy', 'pxr', 'pyvista', 'tetgenExt==0.6.dev0', 'warp']
use_online_index = true
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
import omni.graph.core as og
og.register_ogn_nodes(__file__, "mnresearch.tetgen")
"""====== GENERATED BY omni.graph.tools - DO NOT EDIT ======"""
import omni.graph.tools as ogt
ogt.import_tests_in_directory(__file__, __name__)
145
Python
35.499991
63
0.634483
Kim2091/RTXRemixTools/README.md
# RTXRemixTools
These are some tools I've made that are intended for use with Nvidia's RTX Remix. Right now I have 3:
* **MagicUSDA** - Allows you to generate .usda files based on your gameReadyAssets folder
* **LightAdjuster** - A simple script that allows you to adjust light intensity and color temperature in a specified .usda file
* **RemixMeshConvert** - This script will convert meshes to be (more) compatible with Remix
These should hopefully help with setting up mods for Remix quickly and easily.
import argparse
def adjust_value(line, value_name, percentage, log_changes, i):
if f'float {value_name} =' in line:
parts = line.split('=')
old_value = float(parts[1].strip())
new_value = old_value * percentage
new_line = f'{parts[0]}= {new_value}\n'
if log_changes:
log_line = f'Line {i + 1}: {line.strip()} -> {new_line.strip()}'
print(log_line)
with open('changes.log', 'a') as log:
log.write(log_line + '\n')
line = new_line
return line, True
return line, False
def adjust_file(file_path, start_line=1, log_changes=False, adjust_intensity=False, adjust_color_temperature=False, percentage=None):
with open(file_path, 'r') as file:
data = file.readlines()
lines_changed = 0
with open(file_path, 'w') as file:
for i, line in enumerate(data):
if i + 1 >= start_line:
if adjust_intensity:
line, changed = adjust_value(line, 'intensity', percentage, log_changes, i)
if changed:
lines_changed += 1
if adjust_color_temperature:
line, changed = adjust_value(line, 'colorTemperature', percentage, log_changes, i)
if changed:
lines_changed += 1
file.write(line)
print(f'Completed! {lines_changed} lines changed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adjust the intensity and/or color temperature values in a file.')
parser.add_argument('file_path', type=str, help='The path to the file to modify.')
parser.add_argument('-s', '--start-line', type=int, default=1, help='The line number to start modifying at.')
parser.add_argument('-l', '--log', action='store_true', help='Whether to print a log of the changed lines.')
parser.add_argument('-ai', '--adjust-intensity', action='store_true', help='Whether to adjust the intensity value.')
parser.add_argument('-act', '--adjust-color-temperature', action='store_true', help='Whether to adjust the color temperature value.')
parser.add_argument('-p', '--percentage', type=float, required=True, help='The percentage to adjust the value by.')
args = parser.parse_args()
adjust_file(args.file_path, args.start_line, args.log, args.adjust_intensity, args.adjust_color_temperature, args.percentage)
2,440
Python
52.065216
137
0.609836
Kim2091/RTXRemixTools/LightAdjuster/README.md
# **Remix Light Adjuster**
*Written with the assistance of Bing*
This script adjusts the intensity and/or color temperature values in a file.
$\color{#f7d26a}{\textsf{Please back up your usda files before running!}}$
## Usage
To use this script, run the following command:
`python LightAdjuster.py file_path`
where `file_path` is the path to the .usda file to modify.
There are several additional options that can be used with this script:
* `-s` or `--start-line` - This option allows you to specify the line number to start modifying at. The default value is 1.
* `-l` or `--log` - This option enables logging of the changed lines. If this option is used, a log of the changed lines will be printed to the console and written to a file named `changes.log`.
* `-p` or `--percentage` - This option specifies the percentage to adjust the value by. This option is required.
* `-ai` or `--adjust-intensity` - This option enables adjustment of the intensity value using `-p`.
* `-act` or `--adjust-color-temperature` - This option enables adjustment of the color temperature value using `-p`.
For example, to adjust the intensity value in a file named `data.txt`, starting at line 5, and logging the changes, you would run the following command:
`python adjust_file.py data.txt -s 5 -l -ai -p 0.5`
This would adjust the intensity value in all lines containing `float intensity =`, starting at line 5, by multiplying it by 0.5. A log of the changed lines would be printed to the console and written to a file named `changes.log`.
## Description
This script reads the specified file and modifies lines that contain either `float intensity =` or `float colorTemperature =`, depending on which value is being adjusted. The value is multiplied by the specified percentage and the line is updated with the new value. If logging is enabled, a log of the changed lines is printed to the console and written to a file named `changes.log`.
After all lines have been processed, the script prints a message indicating how many lines were changed.
2,047
Markdown
55.888887
385
0.755252
Kim2091/RTXRemixTools/MagicUSDA/README.md
# Remix USDA Generator
*Written with the assistance of Bing and ChatGPT*
$\color{#f7d26a}{\textsf{Please back up your usda files to a separate folder before running!}}$
This is a script to generate `.usda` files from your gameReadyAssets folder. It detects any of these map types in your folder:
- emissive
- normal
- metallic
- rough
## Usage
How to use this script:
`python MagicUSDA.py -d path\to\gameReadyAssets`
There are some additional functions:
* `-o` - Change the output usda file names.
* `-m` - Split the output USDA files into separate entries for each map type (e.g. mod_emissive.usda, mod_metallic.usda). Works with `-o` to change the base file name.
* `-a` - Add sublayers made with `-m` to the mod.usda file. Not compatible with custom files specified by `-o`, will only modify mod.usda. Works with `-m` and `-o`.
* `-g` - Toggle generating hashes for file names before the suffix. Useful for files with generic names like test.dds. Diffuse textures must be identical to Remix dumps.
* `-s` - Change between the AperturePBR_Opacity and AperturePBR_Translucent material shader types. Using this, you can generate separate .usda files for normal or translucent objects easily
* `-r` _**Currently broken**_ - Specify a separate folder to use as a reference for generating diffuse texture hashes. Searches for files in the reference directory based on file names from the base directory. If not provided, uses the main directory to generate hashes. Useful with folders like captures or game texture rips.
The `.usda` files generated by this script serve to replace textures in your Remix games, allowing you to swap out textures and utilize additional map types to enhance the game's visuals.
This script is intended to be used with original diffuse textures, which are required for it to function correctly. It generates a `mod.usda` file for use in your game through Remix. It was designed with [chaiNNer](https://chainner.app/) in mind, however you can use this with any textures you've created. Be aware that this script will overwrite any pre-existing `mod.usda` files in your directory!
2,113
Markdown
74.499997
399
0.769049
Kim2091/RTXRemixTools/MagicUSDA/MagicUSDA.py
import os
import argparse
import xxhash
from pxr import Usd, UsdGeom, UsdShade, Sdf
suffixes = ["_normal", "_emissive", "_metallic", "_rough"]
def generate_hashes(file_path) -> str:
# Read the file and extract the raw data. Thanks @BlueAmulet!
with open(file_path, "rb") as file:
data = file.read(128)
dwHeight = int.from_bytes(data[12:16], "little")
dwWidth = int.from_bytes(data[16:20], "little")
pfFlags = int.from_bytes(data[80:84], "little")
pfFourCC = data[84:88]
bitCount = int.from_bytes(data[88:92], "little")
mipsize = dwWidth * dwHeight
if pfFlags & 0x4: # DDPF_FOURCC
if pfFourCC == b"DXT1": # DXT1 is 4bpp
mipsize //= 2
elif pfFlags & 0x20242: # DDPF_ALPHA | DDPF_RGB | DDPF_YUV | DDPF_LUMINANCE
mipsize = mipsize * bitCount // 8
# Read the required portion of the file for hash calculation
with open(file_path, "rb") as file:
file.seek(128) # Move the file pointer to the appropriate position
data = file.read(mipsize)
hash_value = xxhash.xxh3_64(data).hexdigest()
return hash_value.upper()
def write_usda_file(args, file_list, suffix=None) -> [list, list]:
created_files = []
modified_files = []
game_ready_assets_path = os.path.join(args.directory)
# Check if there are any texture files with the specified suffix
if suffix:
has_suffix_files = False
for file_name in file_list:
if file_name.endswith(f"{suffix}.dds"):
has_suffix_files = True
break
if not has_suffix_files:
# return a blank set
return [created_files, modified_files]
usda_file_name = f'{args.output}{suffix if suffix else ""}.usda'
usda_file_path = os.path.join(game_ready_assets_path, usda_file_name)
if os.path.exists(usda_file_path):
modified_files.append(usda_file_path)
else:
created_files.append(usda_file_path)
targets = {}
reference_directory = args.reference_directory if args.reference_directory else args.directory
for file_name in file_list:
if file_name.endswith(".dds"):
# Extract only the file name from the absolute path
name = os.path.basename(file_name)
name, ext = os.path.splitext(name)
if "_" not in name or name.endswith("_diffuse") or name.endswith("_albedo"):
# Check if the generate_hashes argument is specified
if args.generate_hashes:
key = name.split("_")[0] # Use the prefix of the diffuse file name as the key
hash_value = generate_hashes(os.path.join(reference_directory, file_name)) # Generate hash for the diffuse file
else:
key = os.path.basename(name)
hash_value = key # Use the original name as the hash value
# Check if the key contains a hash or ends with _diffuse or _albedo
if not (key.isupper() and len(key) == 16) and not (key.endswith("_diffuse") or key.endswith("_albedo")):
continue
# Remove the _diffuse or _albedo suffix from the key and hash_value
key = key.replace("_diffuse", "").replace("_albedo", "")
hash_value = hash_value.replace("_diffuse", "").replace("_albedo", "")
# Get the relative path from the game ready assets path to the texture file
rel_file_path = os.path.relpath(file_name, args.directory)
targets[key] = (rel_file_path, hash_value)
# Create a new stage
stage = Usd.Stage.CreateNew(usda_file_path)
# Modify the existing RootNode prim
root_node_prim = stage.OverridePrim("/RootNode")
# Add a Looks scope as a child of the RootNode prim
looks_scope = UsdGeom.Scope.Define(stage, "/RootNode/Looks")
added_targets = set()
for value, (rel_file_path, hash_value) in targets.items():
# Check if there is a corresponding texture file for the specified suffix
if suffix and not any(
file_name.endswith(f"{value}{suffix}.dds") for file_name in file_list
): continue
if value in added_targets:
continue
else:
added_targets.add(value)
print(f"Adding texture {rel_file_path} with hash: {hash_value}")
# Add a material prim as a child of the Looks scope
material_prim = UsdShade.Material.Define(
stage, f"/RootNode/Looks/mat_{hash_value.upper()}"
)
material_prim.GetPrim().GetReferences().SetReferences([])
# Set the shader attributes
shader_prim = UsdShade.Shader.Define(
stage, f"/RootNode/Looks/mat_{hash_value.upper()}/Shader"
)
shader_prim.GetPrim().CreateAttribute("info:mdl:sourceAsset", Sdf.ValueTypeNames.Asset).Set(
f"{args.shader_type}.mdl"
)
shader_prim.GetPrim().CreateAttribute("info:implementationSource", Sdf.ValueTypeNames.Token).Set(
"sourceAsset"
)
shader_prim.GetPrim().CreateAttribute("info:mdl:sourceAsset:subIdentifier", Sdf.ValueTypeNames.Token).Set(
f"{args.shader_type}"
)
shader_output = shader_prim.CreateOutput("output", Sdf.ValueTypeNames.Token)
if not suffix or suffix == "_diffuse" or suffix == "_albedo":
diffuse_texture = shader_prim.CreateInput(
"diffuse_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the diffuse texture
diffuse_texture.Set(f".\{rel_file_path}")
# Process each type of texture
if not suffix or suffix == "_emissive":
emissive_file_name = f"{value}_emissive.dds"
# print(f"Emissive File Name: {emissive_file_name in file_list}")
# print(file_list)
if any(file_path.endswith(emissive_file_name) for file_path in file_list):
emissive_mask_texture = shader_prim.CreateInput(
"emissive_mask_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the emissive texture
emissive_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), emissive_file_name), args.directory)
emissive_mask_texture.Set(f".\{emissive_rel_file_path}")
enable_emission = shader_prim.CreateInput(
"enable_emission", Sdf.ValueTypeNames.Bool
)
enable_emission.Set(True)
emissive_intensity = shader_prim.CreateInput(
"emissive_intensity", Sdf.ValueTypeNames.Float
)
emissive_intensity.Set(5)
if not suffix or suffix == "_metallic":
metallic_file_name = f"{value}_metallic.dds"
if any(file_path.endswith(metallic_file_name) for file_path in file_list):
metallic_texture = shader_prim.CreateInput(
"metallic_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the metallic texture
metallic_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), metallic_file_name), args.directory)
metallic_texture.Set(f".\{metallic_rel_file_path}")
if not suffix or suffix == "_normal":
normal_file_name = f"{value}_normal.dds"
if any(file_path.endswith(normal_file_name) for file_path in file_list):
normalmap_texture = shader_prim.CreateInput(
"normal_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the normal texture
normal_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), normal_file_name), args.directory)
normalmap_texture.Set(f".\{normal_rel_file_path}")
if not suffix or suffix == "_rough":
roughness_file_name = f"{value}_rough.dds"
if any(file_path.endswith(roughness_file_name) for file_path in file_list):
reflectionroughness_texture = shader_prim.CreateInput(
"reflectionroughness_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the roughness texture
roughness_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), roughness_file_name), args.directory)
reflectionroughness_texture.Set(f".\{roughness_rel_file_path}")
# Connect shader output to material inputs
material_prim.CreateInput(
"mdl:displacement", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
material_prim.CreateInput(
"mdl:surface", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
material_prim.CreateInput(
"mdl:volume", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
# Save the stage
stage.Save()
return [modified_files, created_files]
def add_sublayers(args, file_list) -> list:
modified_files = []
game_ready_assets_path = os.path.join(args.directory)
mod_file_path = os.path.join(game_ready_assets_path, "mod.usda")
if os.path.exists(mod_file_path):
modified_files.append(mod_file_path)
# Open the existing stage
stage = Usd.Stage.Open(mod_file_path)
# Get the existing sublayers
existing_sublayers = list(stage.GetRootLayer().subLayerPaths)
# Create a set of existing sublayer file names
existing_sublayer_files = {
os.path.basename(sublayer_path) for sublayer_path in existing_sublayers
}
# Add new sublayers
new_sublayers = [
f"./{args.output}{suffix}.usda"
for suffix in suffixes
if f"{args.output}{suffix}.usda" not in existing_sublayer_files
and any(
os.path.basename(file_path) == f"{args.output}{suffix}.usda"
for file_path in file_list
)
]
stage.GetRootLayer().subLayerPaths = (existing_sublayers + new_sublayers)
# Save the stage
stage.Save()
return modified_files
if __name__ == "__main__":
# ARGUMENT BLOCK
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", required=True, help="Path to directory")
parser.add_argument("-o", "--output", default="mod", help="Output file name")
parser.add_argument("-g", "--generate-hashes", action="store_true", help="Generates hashes for file names before the suffix")
parser.add_argument("-m", "--multiple-files", action="store_true", help="Save multiple .usda files, one for each suffix type (except for diffuse)")
parser.add_argument("-a", "--add-sublayers", action="store_true", help="Add sublayers made with -m to the mod.usda file. This argument only modifies the mod.usda file and does not affect any custom USDA file specified by the -o argument.")
parser.add_argument("-s", "--shader-type", default="AperturePBR_Opacity", choices=["AperturePBR_Opacity", "AperturePBR_Translucent"], help="Shader type")
parser.add_argument("-r", "--reference-directory", help="Path to reference directory for diffuse texture hashes")
args = parser.parse_args()
# Check target processing directory before use
if not os.path.isdir(args.directory):
raise FileNotFoundError("Specified processing directory (-d) is invalid")
# Recursively scan folders
file_list = []
for root, dirs, files in os.walk(args.directory):
for file in files:
file_list.append(os.path.join(root, file))
created_files = []
modified_files = []
# Process sublayer additions
print(f"Add Sublayers: {args.add_sublayers}")
if args.add_sublayers:
modified_files.extend(add_sublayers(args, file_list))
# Generate unique USDA files per suffix type (except diffuse)
if args.multiple_files:
for suffix in suffixes:
m, c = write_usda_file(args, file_list, suffix)
modified_files.extend(m), created_files.extend(c)
else: # Generate a single USDA file for all suffixes
m, c = write_usda_file(args, file_list)
modified_files.extend(m), created_files.extend(c)
# Complete
print("Finished!")
print("Created files:")
for file in created_files:
print(f" - {file}")
print("Modified files:")
for file in modified_files:
print(f" - {file}")
import argparse
import logging
import os
import shutil
import sys
from pxr import Usd, UsdGeom, Gf, Sdf
ALIASES = {
"primvars:UVMap": ("primvars:st", Sdf.ValueTypeNames.Float2Array),
"primvars:UVChannel_1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
"primvars:map1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
# Add more aliases here
}
def convert_face_varying_to_vertex_interpolation(usd_file_path):
stage = Usd.Stage.Open(usd_file_path)
mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]
for prim in mesh_prims:
mesh = UsdGeom.Mesh(prim)
indices = prim.GetAttribute("faceVertexIndices")
points = prim.GetAttribute("points")
if not indices or not points:
continue # Skip if the required attributes are missing
points_arr = points.Get()
modified_points = [points_arr[i] for i in indices.Get()]
points.Set(modified_points)
indices.Set([i for i in range(len(indices.Get()))])
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
primvar_api = UsdGeom.PrimvarsAPI(prim)
for var in primvar_api.GetPrimvars():
if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:
var.SetInterpolation(UsdGeom.Tokens.vertex)
# Replace aliases with "float2[] primvars:st"
if var.GetName() in ALIASES:
new_name, new_type_name = ALIASES[var.GetName()]
new_var = primvar_api.GetPrimvar(new_name)
if new_var:
new_var.Set(var.Get())
else:
new_var = primvar_api.CreatePrimvar(new_name, new_type_name)
new_var.Set(var.Get())
new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex
primvar_api.RemovePrimvar(var.GetBaseName())
return stage
def process_folder(input_folder, output_folder, output_extension=None):
for file_name in os.listdir(input_folder):
input_file = os.path.join(input_folder, file_name)
if output_extension:
file_name = os.path.splitext(file_name)[0] + '.' + output_extension
output_file = os.path.join(output_folder, file_name)
if not os.path.isfile(input_file):
continue
shutil.copy(input_file, output_file) # Make a copy of the input file and rename it to the output file
stage = convert_face_varying_to_vertex_interpolation(output_file)
stage.Save() # Modify the output file in place
logging.info(f"Processed file: {input_file} -> {output_file}")
def main():
parser = argparse.ArgumentParser(description='Convert USD file formats and interpolation of meshes.')
parser.add_argument('input', type=str, help='Input file or folder path')
parser.add_argument('output', type=str, help='Output file or folder path')
parser.add_argument('-f', '--format', type=str, choices=['usd', 'usda'], help='Output file format (usd or usda)')
args = parser.parse_args()
input_path = args.input
output_path = args.output
output_extension = args.format
logging.basicConfig(level=logging.INFO, format='%(message)s')
if os.path.isdir(input_path):
process_folder(input_path, output_path, output_extension)
else:
if output_extension:
output_path = os.path.splitext(output_path)[0] + '.' + output_extension
shutil.copy(input_path, output_path) # Make a copy of the input file and rename it to the output file
stage = convert_face_varying_to_vertex_interpolation(output_path)
stage.Save() # Modify the output file in place
logging.info(f"Processed file: {input_path} -> {output_path}")
if __name__ == '__main__':
main()
3,853
Python
37.929293
117
0.637944
Kim2091/RTXRemixTools/RemixMeshConvert/README.md
## RemixMeshConvert
$\color{#f7d26a}{\textsf{Use this instead. It integrates directly into Omniverse:}}$ https://github.com/Ekozmaster/NvidiaOmniverseRTXRemixTools
<details>
<summary>Old description:</summary>
*Based on a script originally written by E-man*
$\color{#f7d26a}{\textsf{Please back up your USD and USDA files before running!}}$
**How to use this script:**
To convert a single file:
`python RemixMeshConvert.py [input.usda] [output.usda]`
To batch convert a folder:
`python RemixMeshConvert.py path\to\input\folder path\to\output\folder -f [usd or usda]`
**Arguments:**
`-f` `--output-format` - This controls the output format when using the script in **batch** mode
**Description:**
This script takes USD files as input, makes a copy named as the output, converts the interpolation of all meshes in the given USD file from face-varying to vertex, and finally saves the modified stages to the new USD files. It can process a single file or a folder of files, and also includes a dictionary of aliases for replacing specific primvar names with `float2[] primvars:st1`.
**For your final exports to use in-game, please save as USD! USDA files are very inefficient in comparison**
Please refer to `requirements.txt` for necessary Python libraries.
</details>
from pxr import Usd, UsdGeom, Sdf
ALIASES = {
"primvars:UVMap": ("primvars:st", Sdf.ValueTypeNames.Float2Array),
"primvars:UVChannel_1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
"primvars:map1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
# Add more aliases here
}
def convert_face_varying_to_vertex_interpolation(stage):
mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]
for prim in mesh_prims:
mesh = UsdGeom.Mesh(prim)
indices = prim.GetAttribute("faceVertexIndices")
points = prim.GetAttribute("points")
if not indices or not points:
continue # Skip if the required attributes are missing
points_arr = points.Get()
modified_points = [points_arr[i] for i in indices.Get()]
points.Set(modified_points)
indices.Set([i for i in range(len(indices.Get()))])
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
primvar_api = UsdGeom.PrimvarsAPI(prim)
for var in primvar_api.GetPrimvars():
if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:
var.SetInterpolation(UsdGeom.Tokens.vertex)
# Replace aliases with "float2[] primvars:st"
if var.GetName() in ALIASES:
new_name, new_type_name = ALIASES[var.GetName()]
new_var = primvar_api.GetPrimvar(new_name)
if new_var:
new_var.Set(var.Get())
else:
new_var = primvar_api.CreatePrimvar(new_name, new_type_name)
new_var.Set(var.Get())
new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex
# Remove the old primvar directly from the UsdGeomPrimvar object
var.GetAttr().Block()
return stage
stage = omni.usd.get_context().get_stage()
convert_face_varying_to_vertex_interpolation(stage)
## RemixMeshConvert
*Based on a script originally written by E-man*
$\color{#f7d26a}{\textsf{Please back up your USD and USDA files before running!}}$
**How to use this script:**
* Install USD Composer: https://www.nvidia.com/en-us/omniverse/apps/create/
* Once launched, open the Script Editor in Window > Script Editor
* Load your mesh files by dragging it into the pane on the right
* Run the script
For more information, look at [this thread](https://discord.com/channels/1028444667789967381/1096847508002590760/1123306156773879928) in the [RTX Remix Showcase server](https://discord.gg/rtxremix)
**Description:**
The RemixMeshConvert_OV script is only for usage within Omniverse's USD Composer. If you want to process files and folders independently of Omniverse, use RemixMeshConvert in the directory above this one.
**For your final exports to use in-game, please save as USD! USDA files are very inefficient in comparison**
---
description: Come learn how to generate photorealistic images in Nvidia Replicator and build object detection model using Edge Impulse.
---
# The Unreasonable Effectiveness of Synthetic Data
Created By:
[George Igwegbe](https://www.linkedin.com/in/george-igwegbe/)
Public Project Link:
[GitHub](https://github.com/gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse) | [Edge Impulse](https://studio.edgeimpulse.com/public/187851/latest)

## Introduction
Building an object detection model can be tricky since it requires a large dataset. Sometimes, data can be few or not diverse enough to train a robust model. Synthetic data offers an alternative to generating well-represented datasets to build a quality model. By applying domain randomization, we developed photorealistic datasets, trained a neural network, and validated the model using real datasets. To create a diverse dataset, we created a variety of simulated environments with randomized properties: changing lighting conditions, camera position, and material textures. We also show that synthetic, randomized datasets can help generalize a model to adapt to the real-world environment.
## Story
We wanted to replicate the [object detection](https://www.youtube.com/watch?v=Vwv0PJPeC4s) work by Louis Moreau, but this time using synthetic data rather than real data. The project aims to demonstrate how to build and deploy the Edge Impulse object detection model using synthetic datasets generated by Nvidia Omniverse Replicator. The Replicator is an Nvidia Omniverse extension that provides means of generating physically accurate synthetic data.
## Why Synthetic Data?
Computer vision tasks such as classification, object detection, and segmentation require a large-scale dataset. Data collected from some real-world applications tend to be narrow and less diverse, often collected from a single environment, and sometimes is unchanged and stays the same for the most time. In addition, data collected from a single field tend to have fewer examples of tail-end scenarios and rare events, and we cannot easily replicate these situations in the real world.
Andrej Karpathy's presentation - (source: Tesla AI Day, 2021) |
--- |
 |
Consequently, models trained in a single domain are brittle and often fail when deployed in another environment; thus, it requires another training cycle to adapt to the new environment. It raises the question, how can we efficiently and cheaply collect generalized data across several domains? A simple unreasonable effective solution is Domain Randomization, which varies the texture and colour of the foreground object, the background image, the number of lights in the scene, the pose of the lights, and the camera position etc. Domain randomization can further improve the variability in the texture of synthetic data of rare events generated in the simulator.
> The purpose of domain randomization is to provide enough simulated variability at training time such that at test time the model is able to generalize to real-world data.” - Tobin et al, Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World, 2017
Domain Randomization for Transferring Deep Neural Networks - source: Tobin et al, 2017) |
--- |
 |
Nvidia Replicator enables us to perform Domain Randomization. The Replicator is one module within the Omniverse family, and it offers tools and workflow to generate data for various computer vision and non-visual tasks. The Replicator is a highly interoperable tool that integrates with over 40+ modelling/rendering applications across different verticals. The seamless integration is possible thanks to Pixar's Universal Scene Description(USD), which serves as a protocol for various applications such as Blender, 3DMax, Maya, Revit, C4D etc., to work with the Nvidia Replicator.
## Data-Centric Workflow
Traditional machine learning workflow is often model-centric, focusing more on the model's development by iteratively improving the algorithm design, etc. In this project, we chose the Data-centric approach, where we fixed the model and iteratively improved the quality of the generated dataset. This approach is more robust since we know our model is as good as the dataset. This method hence systematically changes the dataset performance on an AI task. At its core, it is thinking about ML in terms of data, not the model.
Data generation and model building workflow |
--- |
 |
## Requirements
- Nvidia Omniverse Replicator
- Edge Impulse Studio
- Logitech Webcam HD Pro - C920
### Hardware and Driver Setup
Nvidia Omniverse Replicator is a computation-intensive application requiring a moderate-size GPU and decent RAM. My hardware setup consists of 32GB RAM, 1TB storage space and 8GB GPU with an Intel i9 processor.
Hardware Specification | Hardware Specification
--- | ---
 | 
The application can run on both Windows and Linux operating systems. For this experiment, we used Ubuntu 20.04 LTS distro, given Ubuntu 18.04 is no longer supported by Nvidia Omniverse as of November 2022. In addition, we selected the appropriate Nvidia driver, v510.108.03 and installed it on a Linux machine.
Software Specification | Software Specification
--- | ---
 | 
## Experiment Setup and Data Generation
The environment for the experiment consists of movable and immovable objects (dynamic and static positioning objects). The immovable object consists of Lights, a Table and two Cameras. At the same time, the movable objects are the cutlery which is a spoon, fork and knife. We will use domain randomization to alter the properties of some of the movable and immovable objects. Assets which include objects and scenes are represented in the Replicator as USD.
Experimental Setup |
--- |
 |
Every object in Omniverse Replicator is represented as USD. A 3D model file with varying extensions such as obj, fbx, and glif can be imported into the Replicator using Nvidia Omniverse's CAD Importer extension. The extension converts the 3D files into USD. We imported our assets (Table, knife, spoon and fork) into the simulator by specifying the path of the assets.
Rectangular Light | Dome Light
--- | --- |
 | 
Lightning plays a crucial role in data generation. There are different built-in lighting types in the Nvidia replicator. We choose two rectangular lights and a dome light since they give us better lighting options and capabilities for generating photorealistic images. The rectangular light emulates light generated from a panel, and the dome light lets you dynamically lighten the entire scene. We randomized some light parameters such as temperature and intensity, and both parameters were sampled from a <strong>normal distribution</strong>. In addition, the scale parameter was sampled from a <strong>uniform distribution</strong> while keeping the rotation and position of the lights fixed.
```python
# Lightning setup for Rectangular light and Dome light
def rect_lights(num=2):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(-131,150,-134),
rotation=(-90,0,0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=1):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(0,0,0),
rotation=(270,0,0),
count=num
)
return lights.node
```
We fixed the position and rotation, selected the tabletop materials, chose an additional <strong>Mahogany</strong> material, and alternated the material in the data generation process.
```python
# Import and position the table object
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(-135.39745, 0, -140.25696),
rotation=(0,-90,-90),
)
return table
```
To improve our dataset's quality further, we chose two cameras of different resolutions, which we strategically positioned in various locations within the scene. In addition, we varied the position of the cameras in a different version of the data generation process.
```python
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera1, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
```
Finally, for the movable objects, which include a knife, spoon and fork, we ensure that these objects can only translate within the bound of the table. So we chose a bounding position where the objects were expected to translate and rotate with the table. We sampled position and rotation from a uniform distribution while maintaining the number of movable objects generated at each iteration to be five.
```python
# Define randomizer function for CULTERY assets.
def cutlery_props(size=5):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),
rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),
)
return instances.node
```
At this juncture, we have instantiated all objects in our scene. We can now run the randomizer to generate 50 images at each synthetic generation cycle.
```python
# Register randomization
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(5)
# Run the simulation graph
rep.orchestrator.run()
```
To ensure we generated photorealistic images, we switched to <strong>RTXinteractive(Path Tracing)</strong> mode, which gave high-fidelity renderings.
Data generation process |
--- |
 |
## Data Distribution and Model Building
Data Distribution of different items |
--- |
 |
Following the data-centric philosophy, We generated three versions of the dataset. The first version, <strong>V1</strong>, consists of generated images normal to the camera position, and <strong>V2</strong> represents images generated at an angle of 60 degrees to the camera position with a mahogany table top. <strong>V3</strong> comprises images normal to the camera position while the cutlery were suspended in space.
V1 - Normal to the object |
--- |
 |
<table>
<tr>
<td>V2 - Angled to the object</td>
<td>V3 - Normal to the object and object suspended in space</td>
</tr>
<tr>
<td valign="top"><img src="media_assets/v2.avif"></td>
<td valign="top"><img src="media_assets/v3.avif"></td>
</tr>
</table>
<table>
<tr>
<td>Generated Dataset - V2</td>
<td>Generated Dataset - V3</td>
</tr>
<tr>
<td valign="top"><img src="media_assets/generated_dataset.avif"></td>
<td valign="top"><img src="media_assets/generated_dataset2.avif"></td>
</tr>
</table>
## Edge Impulse: Data Annotation and Model Building
<table>
<tr>
<td>Data Labeler </td>
<td>Data Annotation</td>
</tr>
<tr>
<td><img src="media_assets/annotating_image.png"></td>
<td><img src="media_assets/image_in_queue.png"></td>
</tr>
</table>
We uploaded the generated images to Edge Impulse Studio, where we annotated the dataset into different classes. We carefully annotated each dataset version and trained using the <strong>Yolov5</strong> object detection model. We tried a couple of input sizes ranging from 320, 512 and 1024 pixels before settling with <strong>320</strong>. Edge Impulse provided an excellent version control system for models, which enabled us to track model performance across different dataset versions and hyperparameters.
<table>
<tr>
<td>Create Impulse</td>
<td>Generate Feature </td>
</tr>
<tr>
<td><img src="media_assets/building_model.png"></td>
<td><img src="media_assets/feature_extraction.png"></td>
</tr>
</table>
Version Control in Edge Impulse |
--- |
 |
### Testing of Object Detection Models with Real Objects
We used the Edge Impulse CLI tool to evaluate the model's accuracy by downloading, building and running the model locally. A Logitech C920 webcam streamed the live video of objects on a table from 50 cm to 80 cm from the camera. The position of the camera remains fixed during the experiment. The clips below show that the trained model does not generalize well to real-world objects. Thus we needed to improve the model by uploading, annotating and training the model with the V2 dataset.
V1 failure - model failed to identify objects |
--- |
 |
We observed improved model performance when trained with the V2 dataset. The model could identify various objects distinctly, although the model failed when we changed the objects' orientations. Thus, we trained the model with the remaining V3 dataset to mitigate these issues and increase other hyperparameters, such as epochs from 500 to 2000. We also tested the performance of our object detector on real objects with different background textures, and the model performed well in these conditions.
V2 success - model can identify objects |
--- |
 |
V2 failure - model failed to identify objects in different orientations |
--- |
 |
After several cycles of iterating over various hyperparameters, we got a model that generalizes well across different orientations.
V3 success - model can identify objects in different orientations |
--- |
 |
V3 success - model can identify different materials |
--- |
 |
The core idea behind the data-centric approach to solving ML problems is to create more data around the failure points of the model. We improved the model by iteratively improving the data generation, especially in areas where the model had previously failed.

## Conclusion
In this work, we learned how the domain randomization approach helps generate quality and well-generalized datasets for the object detection task. We also demonstrated the effectiveness of data-centric machine learning workflow in improving the model performance. Although this work is restricted to visual problems, we can extend domain randomization to other sensors such as lidar, accelerometer, and ultrasonic sensors.
## Reference
- [Project on Edge Impulse](https://studio.edgeimpulse.com/public/187851/latest)
- [Introduction to Replicator](https://docs.omniverse.nvidia.com/app_code/prod_extensions/ext_replicator.html)
- [Introduction to USD](https://developer.nvidia.com/usd#usdnvidia)
- [Telsa AI Day](https://youtu.be/j0z4FweCy4M?t=5727)
- [Domain Randomization for Transferring Deep Neural Networks](https://arxiv.org/pdf/1703.06907.pdf)
- [Understanding Domain Randomization for SIM-TO-REAL Transfer](https://arxiv.org/pdf/2110.03239.pdf)
### Synthetic data with Nvidia replicator and Edge Impulse

- Fixed position
- Fixed Camera but not random
- Fixed Lightning and light parameters
- Changed background materials
import omni.replicator.core as rep
with rep.new_layer():
# Load in asset
local_path = "/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/"
TABLE_USD =f"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd"
SPOON_SMALL_USD = f"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd"
SPOON_BIG_USD = f"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd"
FORK_SMALL_USD = f"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd"
FORK_BIG_USD = f"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd"
KNIFE_USD = f"{local_path}/asset/Collected_Knife/Knife.usd"
# Camera paramters
cam_position = (-131,200,-134)
cam_position2 = (-131,120,-134)
cam_position_random = rep.distribution.uniform((0,181,0), (0, 300, 0))
cam_rotation = (-60,0,0) #(-45,0,0)
focus_distance = 120
focus_distance2 = 72
focal_length = 19.1
focal_length2 = 7.5
f_stop = 1.8
f_stop2 = 1.8
focus_distance_random = rep.distribution.normal(500.0, 100)
# Cultery path
current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD
output_path = current_cultery.split(".")[0].split("/")[-1]
def rect_lights(num=2):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(-131,150,-134),
rotation=(-90,0,0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=1):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(0,0,0),
rotation=(270,0,0),
count=num
)
return lights.node
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(-135.39745, 0, -140.25696),
rotation=(0,-90,-90),
)
return table
# Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.
def cutlery_props(size=15):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),
rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),
)
return instances.node
# Register randomization
rep.randomizer.register(table)
rep.randomizer.register(cutlery_props)
rep.randomizer.register(rect_lights)
rep.randomizer.register(dome_lights)
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir=f"{local_path}/data/normal_60/{output_path}", rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)
writer.attach([render_product, render_product2])
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(15)
# Run the simulation graph
rep.orchestrator.run()
4,068
Python
38.125
153
0.641839
mati-nvidia/window-menu-add/README.md
# Window Menu Add
An example extension showing how to create a window and add it to the `Window` menu so that it can be shown and hidden
using the menu item in the `Window` menu.
## App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
## Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
import carb
import omni.ext
import omni.kit.ui
from .window import MyCustomWindow, WINDOW_TITLE
class WindowMenuAddExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.example.window.add] WindowMenuAddExtension startup")
# Note the "Window" part of the path that directs the new menu item to the "Window" menu.
self._menu_path = f"Window/{WINDOW_TITLE}"
self._window = None
self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, self._on_menu_click, True)
def on_shutdown(self):
carb.log_info("[maticodes.example.window.add] WindowMenuAddExtension shutdown")
omni.kit.ui.get_editor_menu().remove_item(self._menu)
if self._window is not None:
self._window.destroy()
self._window = None
def _on_menu_click(self, menu, toggled):
"""Handles showing and hiding the window from the 'Windows' menu."""
if toggled:
if self._window is None:
self._window = MyCustomWindow(WINDOW_TITLE, self._menu_path)
else:
self._window.show()
else:
if self._window is not None:
self._window.hide()