Skip to content

Commit

Permalink
Print fx graph size
Browse files Browse the repository at this point in the history
  • Loading branch information
justinchuby committed Mar 13, 2024
1 parent e9c47d0 commit 05112da
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion docs/examples/llama/llama_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,8 @@ def __init__(self, config):

def forward(self, input_ids, attention_mask):
model_output = self.model(input_ids, attention_mask=attention_mask)
return model_output[0]
# Output 2, 3 are None
return model_output[0], model_output[1]

def generate_example_inputs(batch: int, seq: int, vocab_size: int):
input_ids = ids_tensor([batch, seq], vocab_size)
Expand Down Expand Up @@ -221,7 +222,9 @@ def display_model_stats(model: onnx.ModelProto):
def export():
model, example_args_collection = get_llama_model()
exported = torch.export.export(model, example_args_collection[0])
print("===exported fx graph===")
print(exported)
print("FX Node count:", len(exported.graph.nodes))
exported_onnx = torch.onnx.dynamo_export(exported, *example_args_collection[0]).model_proto
print("===exported_onnx===")
display_model_stats(exported_onnx)
Expand Down

0 comments on commit 05112da

Please sign in to comment.