You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "/mnt/rangehow/ICLP/main.py", line 24, in<module>
vectors=get_embeddings(1,docs,args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/ICLP/get_embeddings.py", line 22, in get_embeddings
doc_embeddings = model.encode(docs,convert_to_tensor=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/sentence_transformers/SentenceTransformer.py", line 601, in encode
out_features = self.forward(features, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/sentence_transformers/SentenceTransformer.py", line 668, in forward
input = module(input, **module_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1554, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1563, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/sentence_transformers/models/Transformer.py", line 118, in forward
output_states = self.auto_model(**trans_features, **kwargs, return_dict=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1554, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1563, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/.cache/huggingface/modules/transformers_modules/dunzhang/stella_en_400M_v5/1bb50bc7bb726810eac2140e62155b88b0df198f/modeling.py", line 919, in forward
encoder_outputs = self.encoder(
^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1554, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1563, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/.cache/huggingface/modules/transformers_modules/dunzhang/stella_en_400M_v5/1bb50bc7bb726810eac2140e62155b88b0df198f/modeling.py", line 736, in forward
layer_outputs = layer_module(
^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1554, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1563, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/.cache/huggingface/modules/transformers_modules/dunzhang/stella_en_400M_v5/1bb50bc7bb726810eac2140e62155b88b0df198f/modeling.py", line 658, in forward
attention_outputs = self.attention(
^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1554, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1563, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/.cache/huggingface/modules/transformers_modules/dunzhang/stella_en_400M_v5/1bb50bc7bb726810eac2140e62155b88b0df198f/modeling.py", line 499, in forward
context_layer = self.memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/xformers/ops/fmha/__init__.py", line 276, in memory_efficient_attention
return _memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/xformers/ops/fmha/__init__.py", line 395, in _memory_efficient_attention
return _memory_efficient_attention_forward(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/xformers/ops/fmha/__init__.py", line 411, in _memory_efficient_attention_forward
inp.validate_inputs()
File "/mnt/rangehow/miniconda3/lib/python3.12/site-packages/xformers/ops/fmha/common.py", line 145, in validate_inputs
raise ValueError(
ValueError: Attention bias and Query/Key/Value should be on the same device
query.device: cuda:1
attn_bias : cuda:0
The text was updated successfully, but these errors were encountered:
Traceback
The text was updated successfully, but these errors were encountered: