Embedding Model Construction
pip install torch transformersfrom transformers import LlamaForCausalLM, LlamaTokenizer
model_name = "facebook/llama-7b"
tokenizer = LlamaTokenizer.from_pretrained(model_name)
model = LlamaForCausalLM.from_pretrained(model_name)import torch
from transformers.models.llama.modeling_llama import LlamaAttention
class LlamaBidirectionalAttention(LlamaForCausalLM):
def forward(self, input_ids, attention_mask=None, **kwargs):
if attention_mask is not None:
attention_mask = torch.ones_like(attention_mask)
return super().forward(input_ids, attention_mask=attention_mask, **kwargs)PreviousEmbedding and Fine-Tuning in Neural Language ModelsNextDemystifying Embedding Spaces using Large Language Models
Last updated
Was this helpful?


