You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello! I encountered a problem when fine - tuning my own dataset. I really don't know how to solve it.
Traceback (most recent call last):
File "lora_infer.py", line 204, in
generation_output = model.generate(
File "/root/autodl-tmp/prollama/ProLLaMA-main/peft/peft_model.py", line 581, in generate
outputs = self.base_model.generate(**kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/generation/utils.py", line 1719, in generate
return self.sample(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/generation/utils.py", line 2801, in sample
outputs = self(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 1034, in forward
outputs = self.model(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 922, in forward
layer_outputs = decoder_layer(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 672, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 366, in forward
query_states = self.q_proj(hidden_states)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/autodl-tmp/prollama/ProLLaMA-main/peft/tuners/lora.py", line 375, in forward
result += self.lora_B(self.lora_A(self.lora_dropout(x))) * self.scaling
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: expected scalar type BFloat16 but found Float
Can someone provide guidance on how to proceed? Thanks in advance!
The text was updated successfully, but these errors were encountered:
Hello! I encountered a problem when fine - tuning my own dataset. I really don't know how to solve it.
Traceback (most recent call last):
File "lora_infer.py", line 204, in
generation_output = model.generate(
File "/root/autodl-tmp/prollama/ProLLaMA-main/peft/peft_model.py", line 581, in generate
outputs = self.base_model.generate(**kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/generation/utils.py", line 1719, in generate
return self.sample(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/generation/utils.py", line 2801, in sample
outputs = self(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 1034, in forward
outputs = self.model(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 922, in forward
layer_outputs = decoder_layer(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 672, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py", line 366, in forward
query_states = self.q_proj(hidden_states)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/autodl-tmp/prollama/ProLLaMA-main/peft/tuners/lora.py", line 375, in forward
result += self.lora_B(self.lora_A(self.lora_dropout(x))) * self.scaling
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/miniconda3/envs/q/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: expected scalar type BFloat16 but found Float
Can someone provide guidance on how to proceed? Thanks in advance!
The text was updated successfully, but these errors were encountered: