Skip to content

Commit

Permalink
Removing error messages
Browse files Browse the repository at this point in the history
  • Loading branch information
dibyaghosh authored and kvablack committed Dec 22, 2023
1 parent 0eb0e65 commit f0952fd
Showing 1 changed file with 4 additions and 63 deletions.
67 changes: 4 additions & 63 deletions examples/01_inference_pretrained.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -17,27 +17,7 @@
"execution_count": null,
"id": "83d34283",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-12-13 00:16:30.675856: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2023-12-13 00:16:30.675886: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2023-12-13 00:16:30.676764: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-12-13 00:16:31.089480: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"/home/oier/miniconda3/envs/octo/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"Fetching 7 files: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:00<00:00, 180123.48it/s]\n",
"/home/oier/miniconda3/envs/octo/lib/python3.10/site-packages/transformers/models/t5/tokenization_t5_fast.py:158: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-base automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n"
]
}
],
"outputs": [],
"source": [
"from octo.model.octo_model import OctoModel\n",
"\n",
Expand Down Expand Up @@ -88,15 +68,6 @@
"id": "e669650f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:'observations' is missing items compared to example_batch: {'pad_mask_dict/image_wrist', 'pad_mask_dict/timestep', 'image_wrist', 'pad_mask_dict/image_primary', 'proprio', 'pad_mask_dict/proprio', 'timestep'}\n",
"WARNING:root:No pad_mask_dict found. Nothing will be masked.\n",
"WARNING:root:Skipping observation tokenizer: obs_wrist\n"
]
},
{
"name": "stdout",
"output_type": "stream",
Expand Down Expand Up @@ -172,21 +143,7 @@
"execution_count": null,
"id": "42c04953-869d-48a8-a2df-e601324e97e6",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Fetching 7 files: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:00<00:00, 149796.57it/s]\n",
"/home/oier/miniconda3/envs/octo/lib/python3.10/site-packages/transformers/models/t5/tokenization_t5_fast.py:158: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-base automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n"
]
}
],
"outputs": [],
"source": [
"from octo.model.octo_model import OctoModel\n",
"\n",
Expand Down Expand Up @@ -267,15 +224,7 @@
"execution_count": null,
"id": "9ad64434",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:'tasks' is missing items compared to example_batch: {'pad_mask_dict/image_wrist', 'pad_mask_dict/timestep', 'image_wrist', 'proprio', 'pad_mask_dict/proprio', 'timestep'}\n"
]
}
],
"outputs": [],
"source": [
"WINDOW_SIZE = 2\n",
"\n",
Expand All @@ -292,15 +241,7 @@
"execution_count": null,
"id": "74d6b20f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 37/37 [00:00<00:00, 224.02it/s]\n"
]
}
],
"outputs": [],
"source": [
"# run inference loop, this model only uses single image observations for bridge\n",
"# collect predicted and true actions\n",
Expand Down

0 comments on commit f0952fd

Please sign in to comment.