Skip to content

Commit

Permalink
Volume rendering compositing changes and wireframe rendering to vis c…
Browse files Browse the repository at this point in the history
…hunks in volume rendering mode (#503)

* feat: VR front-to-back compositing and chunk vis

* chore: add comment about opacity correction source

* refactor: clearer naming for compositing

* chore: fix formatting

* feat: add gain parameter to VR and python binds
The gain float can be used to scale the VR intensity.
Also bind that gain to Python controls and bind VR bool to Python.

* refactor: pull OIT emit function out as standalone

* feat: perform OIT along rays during VR

* test: ensure that color and revealage are independent of sampling rate
Refactored volume rendering shader to accomodate

* feat: add depth culling to VR rays during marching

* chore: format file

* fix: break composite loop if VR ray goes behind opaque

* fix: remove console.log during testing

* feat: volume rendering chunk vis in wireframe mode

* refactor: rename gain to volumeRenderingGain
Also update relevant Python bindings

* feat: change gain scale from linear to exponential

* refactor: rename to depthBufferTexture (remove ID)

* format: run python formatting

* fix: default volume rendering slider gain of 0 (e^0 passed to shader)
  • Loading branch information
seankmartin authored Feb 6, 2024
1 parent 9524a72 commit a9939d6
Show file tree
Hide file tree
Showing 7 changed files with 221 additions and 20 deletions.
2 changes: 2 additions & 0 deletions python/neuroglancer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@
PlaceEllipsoidTool, # noqa: F401
BlendTool, # noqa: F401
OpacityTool, # noqa: F401
VolumeRenderingTool, # noqa: F401
VolumeRenderingGainTool, # noqa: F401
VolumeRenderingDepthSamplesTool, # noqa: F401
CrossSectionRenderScaleTool, # noqa: F401
SelectedAlphaTool, # noqa: F401
Expand Down
18 changes: 18 additions & 0 deletions python/neuroglancer/viewer_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,18 @@ class OpacityTool(Tool):
TOOL_TYPE = "opacity"


@export_tool
class VolumeRenderingTool(Tool):
__slots__ = ()
TOOL_TYPE = "volumeRendering"


@export_tool
class VolumeRenderingGainTool(Tool):
__slots__ = ()
TOOL_TYPE = "volumeRenderingGain"


@export_tool
class VolumeRenderingDepthSamplesTool(Tool):
__slots__ = ()
Expand Down Expand Up @@ -543,6 +555,12 @@ def __init__(self, *args, **kwargs):
)
opacity = wrapped_property("opacity", optional(float, 0.5))
blend = wrapped_property("blend", optional(str))
volume_rendering = volumeRendering = wrapped_property(
"volumeRendering", optional(bool, False)
)
volume_rendering_gain = volumeRenderingGain = wrapped_property(
"volumeRenderingGain", optional(float, 1)
)
volume_rendering_depth_samples = volumeRenderingDepthSamples = wrapped_property(
"volumeRenderingDepthSamples", optional(float, 64)
)
Expand Down
18 changes: 18 additions & 0 deletions src/image_user_layer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ import {
ShaderControls,
} from "#/widget/shader_controls";
import { Tab } from "#/widget/tab_view";
import { trackableFiniteFloat } from "#/trackable_finite_float";

const OPACITY_JSON_KEY = "opacity";
const BLEND_JSON_KEY = "blend";
Expand All @@ -106,6 +107,7 @@ const SHADER_CONTROLS_JSON_KEY = "shaderControls";
const CROSS_SECTION_RENDER_SCALE_JSON_KEY = "crossSectionRenderScale";
const CHANNEL_DIMENSIONS_JSON_KEY = "channelDimensions";
const VOLUME_RENDERING_JSON_KEY = "volumeRendering";
const VOLUME_RENDERING_GAIN_JSON_KEY = "volumeRenderingGain";
const VOLUME_RENDERING_DEPTH_SAMPLES_JSON_KEY = "volumeRenderingDepthSamples";

export interface ImageLayerSelectionState extends UserLayerSelectionState {
Expand All @@ -125,6 +127,7 @@ export class ImageUserLayer extends Base {
dataType = new WatchableValue<DataType | undefined>(undefined);
sliceViewRenderScaleHistogram = new RenderScaleHistogram();
sliceViewRenderScaleTarget = trackableRenderScaleTarget(1);
volumeRenderingGain = trackableFiniteFloat(0);
volumeRenderingChunkResolutionHistogram = new RenderScaleHistogram(
volumeRenderingDepthSamplesOriginLogScale,
);
Expand Down Expand Up @@ -199,6 +202,7 @@ export class ImageUserLayer extends Base {
isLocalDimension;
this.blendMode.changed.add(this.specificationChanged.dispatch);
this.opacity.changed.add(this.specificationChanged.dispatch);
this.volumeRenderingGain.changed.add(this.specificationChanged.dispatch);
this.fragmentMain.changed.add(this.specificationChanged.dispatch);
this.shaderControlState.changed.add(this.specificationChanged.dispatch);
this.sliceViewRenderScaleTarget.changed.add(
Expand Down Expand Up @@ -252,6 +256,7 @@ export class ImageUserLayer extends Base {
);
const volumeRenderLayer = context.registerDisposer(
new VolumeRenderingRenderLayer({
gain: this.volumeRenderingGain,
multiscaleSource: volume,
shaderControlState: this.shaderControlState,
shaderError: this.shaderError,
Expand Down Expand Up @@ -299,6 +304,9 @@ export class ImageUserLayer extends Base {
specification[CHANNEL_DIMENSIONS_JSON_KEY],
);
this.volumeRendering.restoreState(specification[VOLUME_RENDERING_JSON_KEY]);
this.volumeRenderingGain.restoreState(
specification[VOLUME_RENDERING_GAIN_JSON_KEY],
);
this.volumeRenderingDepthSamplesTarget.restoreState(
specification[VOLUME_RENDERING_DEPTH_SAMPLES_JSON_KEY],
);
Expand All @@ -313,6 +321,7 @@ export class ImageUserLayer extends Base {
this.sliceViewRenderScaleTarget.toJSON();
x[CHANNEL_DIMENSIONS_JSON_KEY] = this.channelCoordinateSpace.toJSON();
x[VOLUME_RENDERING_JSON_KEY] = this.volumeRendering.toJSON();
x[VOLUME_RENDERING_GAIN_JSON_KEY] = this.volumeRenderingGain.toJSON();
x[VOLUME_RENDERING_DEPTH_SAMPLES_JSON_KEY] =
this.volumeRenderingDepthSamplesTarget.toJSON();
return x;
Expand Down Expand Up @@ -451,6 +460,15 @@ const LAYER_CONTROLS: LayerControlDefinition<ImageUserLayer>[] = [
toolJson: VOLUME_RENDERING_JSON_KEY,
...checkboxLayerControl((layer) => layer.volumeRendering),
},
{
label: "Gain (3D)",
toolJson: VOLUME_RENDERING_GAIN_JSON_KEY,
isValid: (layer) => layer.volumeRendering,
...rangeLayerControl((layer) => ({
value: layer.volumeRenderingGain,
options: { min: -10.0, max: 10.0, step: 0.1 },
})),
},
{
label: "Resolution (3D)",
toolJson: VOLUME_RENDERING_DEPTH_SAMPLES_JSON_KEY,
Expand Down
16 changes: 11 additions & 5 deletions src/perspective_view/panel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -109,22 +109,26 @@ void emit(vec4 color, highp uint pickId) {
* http://casual-effects.blogspot.com/2015/03/implemented-weighted-blended-order.html
*/
export const glsl_computeOITWeight = `
float computeOITWeight(float alpha) {
float computeOITWeight(float alpha, float depth) {
float a = min(1.0, alpha) * 8.0 + 0.01;
float b = -gl_FragCoord.z * 0.95 + 1.0;
float b = -depth * 0.95 + 1.0;
return a * a * a * b * b * b;
}
`;

// Color must be premultiplied by alpha.
// Can use emitAccumAndRevealage() to emit a pre-weighted OIT result.
export const glsl_perspectivePanelEmitOIT = [
glsl_computeOITWeight,
`
void emitAccumAndRevealage(vec4 accum, float revealage, highp uint pickId) {
v4f_fragData0 = vec4(accum.rgb, revealage);
v4f_fragData1 = vec4(accum.a, 0.0, 0.0, 0.0);
}
void emit(vec4 color, highp uint pickId) {
float weight = computeOITWeight(color.a);
float weight = computeOITWeight(color.a, gl_FragCoord.z);
vec4 accum = color * weight;
v4f_fragData0 = vec4(accum.rgb, color.a);
v4f_fragData1 = vec4(accum.a, 0.0, 0.0, 0.0);
emitAccumAndRevealage(accum, color.a, pickId);
}
`,
];
Expand Down Expand Up @@ -846,6 +850,8 @@ export class PerspectivePanel extends RenderedDataPanel {
renderContext.emitPickID = false;
for (const [renderLayer, attachment] of visibleLayers) {
if (renderLayer.isTransparent) {
renderContext.depthBufferTexture =
this.offscreenFramebuffer.colorBuffers[OffscreenTextures.Z].texture;
renderLayer.draw(renderContext, attachment);
}
}
Expand Down
5 changes: 5 additions & 0 deletions src/perspective_view/render_layer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ export interface PerspectiveViewRenderContext
* Specifies whether there was a previous pick ID pass.
*/
alreadyEmittedPickID: boolean;

/**
* Specifies the ID of the depth frame buffer texture to query during rendering.
*/
depthBufferTexture?: WebGLTexture | null;
}

export class PerspectiveViewRenderLayer<
Expand Down
73 changes: 73 additions & 0 deletions src/volume_rendering/volume_render_layer.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import { fragmentShaderTest } from "#/webgl/shader_testing";
import { glsl_computeOITWeight } from "#/perspective_view/panel";
import { glsl_emitRGBAVolumeRendering } from "#/volume_rendering/volume_render_layer";
import { vec3 } from "gl-matrix";

describe("volume rendering compositing", () => {
const steps = [16, 22, 32, 37, 64, 100, 128, 256, 512, 551, 1024, 2048];
const revealages = new Float32Array(steps.length);
it("combines uniform colors the same regardless of sampling rate", () => {
fragmentShaderTest(
{
inputSteps: "float",
},
{
outputValue1: "float",
outputValue2: "float",
outputValue3: "float",
outputValue4: "float",
revealage: "float",
},
(tester) => {
const { builder } = tester;
builder.addFragmentCode(glsl_computeOITWeight);
builder.addFragmentCode(`
vec4 color = vec4(0.1, 0.3, 0.5, 0.1);
float idealSamplingRate = 512.0;
float uGain = 0.01;
float uBrightnessFactor;
vec4 outputColor;
float depthAtRayPosition;
`);
builder.addFragmentCode(glsl_emitRGBAVolumeRendering);
builder.setFragmentMain(`
outputColor = vec4(0.0);
revealage = 1.0;
uBrightnessFactor = idealSamplingRate / inputSteps;
for (int i = 0; i < int(inputSteps); ++i) {
depthAtRayPosition = mix(0.0, 1.0, float(i) / (inputSteps - 1.0));
emitRGBA(color);
}
outputValue1 = outputColor.r;
outputValue2 = outputColor.g;
outputValue3 = outputColor.b;
outputValue4 = outputColor.a;
`);
for (let i = 0; i < steps.length; ++i) {
const inputSteps = steps[i];
tester.execute({ inputSteps });
const values = tester.values;
const {
revealage,
outputValue1,
outputValue2,
outputValue3,
outputValue4,
} = values;
const color = vec3.fromValues(
outputValue1 / outputValue4,
outputValue2 / outputValue4,
outputValue3 / outputValue4,
);
expect(color[0]).toBeCloseTo(0.1, 5);
expect(color[1]).toBeCloseTo(0.3, 5);
expect(color[2]).toBeCloseTo(0.5, 5);
revealages[i] = revealage;
}
for (let i = 1; i < revealages.length; ++i) {
expect(revealages[i]).toBeCloseTo(revealages[i - 1], 2);
}
},
);
});
});
Loading

0 comments on commit a9939d6

Please sign in to comment.