Skip to content
This repository has been archived by the owner on Nov 22, 2022. It is now read-only.

Revert D22801628. #1430

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions pytext/config/pytext_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,17 +111,6 @@ class PyTextConfig(ConfigBase):
export_torchscript_path: Optional[str] = None
# Export quantized torchscript model
torchscript_quantize: Optional[bool] = False
# Accelerator options.
# Options:
# "half" - demote model to half precision
# "nnpi" - freeze model for use with Glow on NNPI accelerator
accelerate: List[str] = []
# Inference Interface.
# Specifies which of the 3 optional list parameters a model takes,
# when the model implements the inference_ionterface() method.:
# Possible values: texts, multi_texts, tokens (and/or others as
# supported by inference_interface method).
inference_interface: Optional[str] = None
# Base directory where modules are saved
modules_save_dir: str = ""
# Whether to save intermediate checkpoints for modules if they are best yet
Expand Down
17 changes: 2 additions & 15 deletions pytext/task/new_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,14 +279,7 @@ def export(self, model, export_path, metric_channels=None, export_onnx_path=None
)

def torchscript_export(
self,
model,
export_path=None,
quantize=False,
sort_input=False,
sort_key=1,
inference_interface=None,
accelerate=[], # noqa mutable default is read only
self, model, export_path=None, quantize=False, sort_input=False, sort_key=1
):
# Make sure to put the model on CPU and disable CUDA before exporting to
# ONNX to disable any data_parallel pieces
Expand All @@ -310,13 +303,7 @@ def torchscript_export(
model(*inputs)
if quantize:
model.quantize()
if "half" in accelerate:
model.half()
if inference_interface is not None:
model.inference_interface(inference_interface)
trace = jit.trace(model, inputs)
if "nnpi" in accelerate:
trace._c = torch._C._freeze_module(trace._c)
trace = model.trace(inputs)
if hasattr(model, "torchscriptify"):
trace = model.torchscriptify(self.data.tensorizers, trace)
trace.apply(lambda s: s._pack() if s._c._has_method("_pack") else None)
Expand Down
10 changes: 2 additions & 8 deletions pytext/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,6 @@ def save_and_export(
model=task.model,
export_path=config.export_torchscript_path,
quantize=config.torchscript_quantize,
inference_interface=config.inference_interface,
accelerate=config.accelerate,
)


Expand All @@ -219,14 +217,10 @@ def export_saved_model_to_caffe2(


def export_saved_model_to_torchscript(
saved_model_path: str,
path: str,
quantize: bool = False,
inference_interface: Optional[str] = None,
accelerate: List[str] = [], # noqa mutable default is read only
saved_model_path: str, path: str, quantize: bool = False
) -> None:
task, train_config, _training_state = load(saved_model_path)
task.torchscript_export(task.model, path, quantize, inference_interface, accelerate)
task.torchscript_export(task.model, path, quantize)


def test_model(
Expand Down