From 76a5c185b11d5ba4a1a17baab5259da348dd5dc8 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 17 Aug 2023 20:25:52 +0000 Subject: [PATCH] clean up circular import --- llmfoundry/data/finetuning/dataloader.py | 2 +- llmfoundry/models/layers/mosaicbert_layers.py | 20 +++++++++---------- llmfoundry/models/mpt/configuration_mpt.py | 1 + 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/llmfoundry/data/finetuning/dataloader.py b/llmfoundry/data/finetuning/dataloader.py index 96816f1278..c0c259788d 100644 --- a/llmfoundry/data/finetuning/dataloader.py +++ b/llmfoundry/data/finetuning/dataloader.py @@ -236,7 +236,7 @@ def _validate_config(dataset_cfg: DictConfig): ) -def _build_hf_dataset_from_remote(cfg: DictConfig, tokenizer: Tokenizer): +def _build_hf_dataset_from_remote(cfg: DictConfig, tokenizer): """Builds a dataset from a remote object store. This function supports 'jsonl', 'csv', and 'parquet' file formats for the dataset. It will attempt to download diff --git a/llmfoundry/models/layers/mosaicbert_layers.py b/llmfoundry/models/layers/mosaicbert_layers.py index 524c068963..bd7477e1ce 100644 --- a/llmfoundry/models/layers/mosaicbert_layers.py +++ b/llmfoundry/models/layers/mosaicbert_layers.py @@ -76,7 +76,7 @@ class BertEmbeddings(nn.Module): This module ignores the `position_ids` input to the `forward` method. """ - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, @@ -156,7 +156,7 @@ class BertUnpadSelfAttention(nn.Module): See `forward` method for additional detail. """ - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr( config, 'embedding_size'): @@ -253,7 +253,7 @@ class BertSelfOutput(nn.Module): BERT modules. """ - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, @@ -271,7 +271,7 @@ def forward(self, hidden_states: torch.Tensor, class BertUnpadAttention(nn.Module): """Chains attention, Dropout, and LayerNorm for MosaicBERT.""" - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() self.self = BertUnpadSelfAttention(config) self.output = BertSelfOutput(config) @@ -322,7 +322,7 @@ class BertGatedLinearUnitMLP(nn.Module): parameter size, MosaicBERT typically offers a net higher throughput than a Hugging Face BERT built from the same `config`. """ - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() self.config = config self.gated_layers = nn.Linear(config.hidden_size, @@ -358,7 +358,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: class BertLayer(nn.Module): """Composes the MosaicBERT attention and FFN blocks into a single layer.""" - def __init__(self, config: BertConfig): + def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertUnpadAttention(config) self.mlp = BertGatedLinearUnitMLP(config) @@ -401,7 +401,7 @@ class BertEncoder(nn.Module): at padded tokens, and pre-computes attention biases to implement ALiBi. """ - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() layer = BertLayer(config) self.layer = nn.ModuleList( @@ -548,7 +548,7 @@ def forward( class BertPooler(nn.Module): - def __init__(self, config: BertConfig): + def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() @@ -566,7 +566,7 @@ def forward(self, class BertPredictionHeadTransform(nn.Module): - def __init__(self, config: BertConfig): + def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): @@ -587,7 +587,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: ################### class BertLMPredictionHead(nn.Module): - def __init__(self, config: BertConfig, bert_model_embedding_weights): + def __init__(self, config, bert_model_embedding_weights): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is diff --git a/llmfoundry/models/mpt/configuration_mpt.py b/llmfoundry/models/mpt/configuration_mpt.py index 08c02fa3b1..1bd79c10d2 100644 --- a/llmfoundry/models/mpt/configuration_mpt.py +++ b/llmfoundry/models/mpt/configuration_mpt.py @@ -1,3 +1,4 @@ + # Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0