Skip to content

Commit

Permalink
onp => np
Browse files Browse the repository at this point in the history
  • Loading branch information
brentyi committed Sep 24, 2024
1 parent 7ac568d commit 05308dd
Show file tree
Hide file tree
Showing 56 changed files with 678 additions and 698 deletions.
9 changes: 2 additions & 7 deletions docs/source/examples/01_image.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ NeRFs), or images to render as 3D textures.
from pathlib import Path
import imageio.v3 as iio
import numpy as onp
import numpy as np
import viser
Expand All @@ -47,12 +47,7 @@ NeRFs), or images to render as 3D textures.
while True:
server.scene.add_image(
"/noise",
onp.random.randint(
0,
256,
size=(400, 400, 3),
dtype=onp.uint8,
),
np.random.randint(0, 256, size=(400, 400, 3), dtype=np.uint8),
4.0,
4.0,
format="jpeg",
Expand Down
16 changes: 8 additions & 8 deletions docs/source/examples/02_gui.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Examples of basic GUI elements that we can create, read from, and write to.
import time
import numpy as onp
import numpy as np
import viser
Expand Down Expand Up @@ -96,8 +96,8 @@ Examples of basic GUI elements that we can create, read from, and write to.
print(file.name, len(file.content), "bytes")
# Pre-generate a point cloud to send.
point_positions = onp.random.uniform(low=-1.0, high=1.0, size=(5000, 3))
color_coeffs = onp.random.uniform(0.4, 1.0, size=(point_positions.shape[0]))
point_positions = np.random.uniform(low=-1.0, high=1.0, size=(5000, 3))
color_coeffs = np.random.uniform(0.4, 1.0, size=(point_positions.shape[0]))
counter = 0
while True:
Expand All @@ -111,11 +111,11 @@ Examples of basic GUI elements that we can create, read from, and write to.
# connected clients.
server.scene.add_point_cloud(
"/point_cloud",
points=point_positions * onp.array(gui_vector3.value, dtype=onp.float32),
points=point_positions * np.array(gui_vector3.value, dtype=np.float32),
colors=(
onp.tile(gui_rgb.value, point_positions.shape[0]).reshape((-1, 3))
np.tile(gui_rgb.value, point_positions.shape[0]).reshape((-1, 3))
* color_coeffs[:, None]
).astype(onp.uint8),
).astype(np.uint8),
position=gui_vector2.value + (0,),
point_shape="circle",
)
Expand All @@ -131,8 +131,8 @@ Examples of basic GUI elements that we can create, read from, and write to.
# Update the number of handles in the multi-slider.
if gui_slider_positions.value != len(gui_multi_slider.value):
gui_multi_slider.value = onp.linspace(
0, 100, gui_slider_positions.value, dtype=onp.int64
gui_multi_slider.value = np.linspace(
0, 100, gui_slider_positions.value, dtype=np.int64
)
counter += 1
Expand Down
6 changes: 3 additions & 3 deletions docs/source/examples/03_gui_callbacks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ we get updates.
import time
import numpy as onp
import numpy as np
from typing_extensions import assert_never
import viser
Expand Down Expand Up @@ -86,8 +86,8 @@ we get updates.
num_points = gui_num_points.value
server.scene.add_point_cloud(
"/frame/point_cloud",
points=onp.random.normal(size=(num_points, 3)),
colors=onp.random.randint(0, 256, size=(num_points, 3)),
points=np.random.normal(size=(num_points, 3)),
colors=np.random.randint(0, 256, size=(num_points, 3)),
)
# We can (optionally) also attach callbacks!
Expand Down
8 changes: 4 additions & 4 deletions docs/source/examples/05_camera_commands.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ corresponding client automatically.
import time
import numpy as onp
import numpy as np
import viser
import viser.transforms as tf
Expand All @@ -32,12 +32,12 @@ corresponding client automatically.
When a frame is clicked, we move the camera to the corresponding frame.
"""
rng = onp.random.default_rng(0)
rng = np.random.default_rng(0)
def make_frame(i: int) -> None:
# Sample a random orientation + position.
wxyz = rng.normal(size=4)
wxyz /= onp.linalg.norm(wxyz)
wxyz /= np.linalg.norm(wxyz)
position = rng.uniform(-3.0, 3.0, size=(3,))
# Create a coordinate frame and label.
Expand All @@ -52,7 +52,7 @@ corresponding client automatically.
)
T_world_target = tf.SE3.from_rotation_and_translation(
tf.SO3(frame.wxyz), frame.position
) @ tf.SE3.from_translation(onp.array([0.0, 0.0, -0.5]))
) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5]))
T_current_target = T_world_current.inverse() @ T_world_target
Expand Down
6 changes: 3 additions & 3 deletions docs/source/examples/06_mesh.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Visualize a mesh. To get the demo data, see ``./assets/download_dragon_mesh.sh``
import time
from pathlib import Path
import numpy as onp
import numpy as np
import trimesh
import viser
Expand All @@ -35,13 +35,13 @@ Visualize a mesh. To get the demo data, see ``./assets/download_dragon_mesh.sh``
name="/simple",
vertices=vertices,
faces=faces,
wxyz=tf.SO3.from_x_radians(onp.pi / 2).wxyz,
wxyz=tf.SO3.from_x_radians(np.pi / 2).wxyz,
position=(0.0, 0.0, 0.0),
)
server.scene.add_mesh_trimesh(
name="/trimesh",
mesh=mesh.smoothed(),
wxyz=tf.SO3.from_x_radians(onp.pi / 2).wxyz,
wxyz=tf.SO3.from_x_radians(np.pi / 2).wxyz,
position=(0.0, 5.0, 0.0),
)
Expand Down
6 changes: 3 additions & 3 deletions docs/source/examples/07_record3d_visualizer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Parse and stream record3d captures. To get the demo data, see ``./assets/downloa
import time
from pathlib import Path
import numpy as onp
import numpy as np
import tyro
from tqdm.auto import tqdm
Expand Down Expand Up @@ -96,7 +96,7 @@ Parse and stream record3d captures. To get the demo data, see ``./assets/downloa
# Load in frames.
server.scene.add_frame(
"/frames",
wxyz=tf.SO3.exp(onp.array([onp.pi / 2.0, 0.0, 0.0])).wxyz,
wxyz=tf.SO3.exp(np.array([np.pi / 2.0, 0.0, 0.0])).wxyz,
position=(0, 0, 0),
show_axes=False,
)
Expand All @@ -118,7 +118,7 @@ Parse and stream record3d captures. To get the demo data, see ``./assets/downloa
)
# Place the frustum.
fov = 2 * onp.arctan2(frame.rgb.shape[0] / 2, frame.K[0, 0])
fov = 2 * np.arctan2(frame.rgb.shape[0] / 2, frame.K[0, 0])
aspect = frame.rgb.shape[1] / frame.rgb.shape[0]
server.scene.add_camera_frustum(
f"/frames/t{i}/frustum",
Expand Down
9 changes: 4 additions & 5 deletions docs/source/examples/08_smpl_visualizer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ See here for download instructions:
from pathlib import Path
import numpy as np
import numpy as onp
import tyro
import viser
Expand All @@ -43,7 +42,7 @@ See here for download instructions:
def __init__(self, model_path: Path) -> None:
assert model_path.suffix.lower() == ".npz", "Model should be an .npz file!"
body_dict = dict(**onp.load(model_path, allow_pickle=True))
body_dict = dict(**np.load(model_path, allow_pickle=True))
self._J_regressor = body_dict["J_regressor"]
self._weights = body_dict["weights"]
Expand Down Expand Up @@ -180,7 +179,7 @@ See here for download instructions:
@gui_random_shape.on_click
def _(_):
for beta in gui_betas:
beta.value = onp.random.normal(loc=0.0, scale=1.0)
beta.value = np.random.normal(loc=0.0, scale=1.0)
gui_betas = []
for i in range(num_betas):
Expand All @@ -205,8 +204,8 @@ See here for download instructions:
for joint in gui_joints:
# It's hard to uniformly sample orientations directly in so(3), so we
# first sample on S^3 and then convert.
quat = onp.random.normal(loc=0.0, scale=1.0, size=(4,))
quat /= onp.linalg.norm(quat)
quat = np.random.normal(loc=0.0, scale=1.0, size=(4,))
quat /= np.linalg.norm(quat)
joint.value = tf.SO3(wxyz=quat).log()
gui_joints: list[viser.GuiInputHandle[tuple[float, float, float]]] = []
Expand Down
10 changes: 5 additions & 5 deletions docs/source/examples/09_urdf_visualizer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ and viser. It can also take a path to a local URDF file as input.
import time
from typing import Literal
import numpy as onp
import numpy as np
import tyro
from robot_descriptions.loaders.yourdfpy import load_robot_description
Expand All @@ -44,8 +44,8 @@ and viser. It can also take a path to a local URDF file as input.
lower,
upper,
) in viser_urdf.get_actuated_joint_limits().items():
lower = lower if lower is not None else -onp.pi
upper = upper if upper is not None else onp.pi
lower = lower if lower is not None else -np.pi
upper = upper if upper is not None else np.pi
initial_pos = 0.0 if lower < 0 and upper > 0 else (lower + upper) / 2.0
slider = server.gui.add_slider(
label=joint_name,
Expand All @@ -56,7 +56,7 @@ and viser. It can also take a path to a local URDF file as input.
)
slider.on_update( # When sliders move, we update the URDF configuration.
lambda _: viser_urdf.update_cfg(
onp.array([slider.value for slider in slider_handles])
np.array([slider.value for slider in slider_handles])
)
)
slider_handles.append(slider)
Expand Down Expand Up @@ -97,7 +97,7 @@ and viser. It can also take a path to a local URDF file as input.
)
# Set initial robot configuration.
viser_urdf.update_cfg(onp.array(initial_config))
viser_urdf.update_cfg(np.array(initial_config))
# Create joint reset button.
reset_button = server.gui.add_button("Reset")
Expand Down
14 changes: 7 additions & 7 deletions docs/source/examples/11_colmap_visualizer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Visualize COLMAP sparse reconstruction outputs. To get demo data, see ``./assets
from typing import List
import imageio.v3 as iio
import numpy as onp
import numpy as np
import tyro
from tqdm.auto import tqdm
Expand Down Expand Up @@ -60,7 +60,7 @@ Visualize COLMAP sparse reconstruction outputs. To get demo data, see ``./assets
def _(event: viser.GuiEvent) -> None:
client = event.client
assert client is not None
client.camera.up_direction = tf.SO3(client.camera.wxyz) @ onp.array(
client.camera.up_direction = tf.SO3(client.camera.wxyz) @ np.array(
[0.0, -1.0, 0.0]
)
Expand All @@ -82,10 +82,10 @@ Visualize COLMAP sparse reconstruction outputs. To get demo data, see ``./assets
"Point size", min=0.01, max=0.1, step=0.001, initial_value=0.05
)
points = onp.array([points3d[p_id].xyz for p_id in points3d])
colors = onp.array([points3d[p_id].rgb for p_id in points3d])
points = np.array([points3d[p_id].xyz for p_id in points3d])
colors = np.array([points3d[p_id].rgb for p_id in points3d])
point_mask = onp.random.choice(points.shape[0], gui_points.value, replace=False)
point_mask = np.random.choice(points.shape[0], gui_points.value, replace=False)
point_cloud = server.scene.add_point_cloud(
name="/colmap/pcd",
points=points[point_mask],
Expand Down Expand Up @@ -148,7 +148,7 @@ Visualize COLMAP sparse reconstruction outputs. To get demo data, see ``./assets
image = image[::downsample_factor, ::downsample_factor]
frustum = server.scene.add_camera_frustum(
f"/colmap/frame_{img_id}/frustum",
fov=2 * onp.arctan2(H / 2, fy),
fov=2 * np.arctan2(H / 2, fy),
aspect=W / H,
scale=0.15,
image=image,
Expand All @@ -159,7 +159,7 @@ Visualize COLMAP sparse reconstruction outputs. To get demo data, see ``./assets
@gui_points.on_update
def _(_) -> None:
point_mask = onp.random.choice(points.shape[0], gui_points.value, replace=False)
point_mask = np.random.choice(points.shape[0], gui_points.value, replace=False)
point_cloud.points = points[point_mask]
point_cloud.colors = colors[point_mask]
Expand Down
10 changes: 5 additions & 5 deletions docs/source/examples/15_gui_in_scene.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ performed on them.
import time
from typing import Optional
import numpy as onp
import numpy as np
import viser
import viser.transforms as tf
Expand All @@ -35,14 +35,14 @@ performed on them.
When a frame is clicked, we display a 3D gui node.
"""
rng = onp.random.default_rng(0)
rng = np.random.default_rng(0)
displayed_3d_container: Optional[viser.Gui3dContainerHandle] = None
def make_frame(i: int) -> None:
# Sample a random orientation + position.
wxyz = rng.normal(size=4)
wxyz /= onp.linalg.norm(wxyz)
wxyz /= np.linalg.norm(wxyz)
position = rng.uniform(-3.0, 3.0, size=(3,))
# Create a coordinate frame and label.
Expand Down Expand Up @@ -72,7 +72,7 @@ performed on them.
)
T_world_target = tf.SE3.from_rotation_and_translation(
tf.SO3(frame.wxyz), frame.position
) @ tf.SE3.from_translation(onp.array([0.0, 0.0, -0.5]))
) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5]))
T_current_target = T_world_current.inverse() @ T_world_target
Expand All @@ -94,7 +94,7 @@ performed on them.
@randomize_orientation.on_click
def _(_) -> None:
wxyz = rng.normal(size=4)
wxyz /= onp.linalg.norm(wxyz)
wxyz /= np.linalg.norm(wxyz)
frame.wxyz = wxyz
@close.on_click
Expand Down
6 changes: 3 additions & 3 deletions docs/source/examples/17_background_composite.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ be useful when we want a 2D image to occlude 3D geometry, such as for NeRF rende
import time
import numpy as onp
import numpy as np
import trimesh
import trimesh.creation
Expand All @@ -25,8 +25,8 @@ be useful when we want a 2D image to occlude 3D geometry, such as for NeRF rende
server = viser.ViserServer()
img = onp.random.randint(0, 255, size=(1000, 1000, 3), dtype=onp.uint8)
depth = onp.ones((1000, 1000, 1), dtype=onp.float32)
img = np.random.randint(0, 255, size=(1000, 1000, 3), dtype=np.uint8)
depth = np.ones((1000, 1000, 1), dtype=np.float32)
# Make a square middle portal.
depth[250:750, 250:750, :] = 10.0
Expand Down
10 changes: 5 additions & 5 deletions docs/source/examples/18_splines.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,31 +15,31 @@ Make a ball with some random splines.
import time
import numpy as onp
import numpy as np
import viser
def main() -> None:
server = viser.ViserServer()
for i in range(10):
positions = onp.random.normal(size=(30, 3)) * 3.0
positions = np.random.normal(size=(30, 3)) * 3.0
server.scene.add_spline_catmull_rom(
f"/catmull_{i}",
positions,
tension=0.5,
line_width=3.0,
color=onp.random.uniform(size=3),
color=np.random.uniform(size=3),
segments=100,
)
control_points = onp.random.normal(size=(30 * 2 - 2, 3)) * 3.0
control_points = np.random.normal(size=(30 * 2 - 2, 3)) * 3.0
server.scene.add_spline_cubic_bezier(
f"/cubic_bezier_{i}",
positions,
control_points,
line_width=3.0,
color=onp.random.uniform(size=3),
color=np.random.uniform(size=3),
segments=100,
)
Expand Down
Loading

0 comments on commit 05308dd

Please sign in to comment.