Skip to content

Commit

Permalink
Fix shape issue (#1905)
Browse files Browse the repository at this point in the history
* fix torch guide

* trim .md output
  • Loading branch information
sachinprasadhs authored Aug 3, 2024
1 parent 2ef3f2d commit 687a1d2
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 38 deletions.
4 changes: 2 additions & 2 deletions guides/custom_train_step_in_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Title: Customizing what happens in `fit()` with PyTorch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2023/06/27
Last modified: 2023/06/27
Last modified: 2024/08/01
Description: Overriding the training step of the Model class with PyTorch.
Accelerator: GPU
"""
Expand Down Expand Up @@ -397,7 +397,7 @@ def compile(self, d_optimizer, g_optimizer, loss_fn):

def train_step(self, real_images):
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(real_images, tuple):
if isinstance(real_images, tuple) or isinstance(real_images, list):
real_images = real_images[0]
# Sample random points in the latent space
batch_size = real_images.shape[0]
Expand Down
17 changes: 3 additions & 14 deletions guides/ipynb/custom_train_step_in_torch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"\n",
"**Author:** [fchollet](https://twitter.com/fchollet)<br>\n",
"**Date created:** 2023/06/27<br>\n",
"**Last modified:** 2023/06/27<br>\n",
"**Last modified:** 2024/08/01<br>\n",
"**Description:** Overriding the training step of the Model class with PyTorch."
]
},
Expand Down Expand Up @@ -50,17 +50,6 @@
"Let's see how that works."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"!pip install keras --upgrade --quiet"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand Down Expand Up @@ -282,7 +271,7 @@
"outputs = keras.layers.Dense(1)(inputs)\n",
"model = CustomModel(inputs, outputs)\n",
"\n",
"# We don't passs a loss or metrics here.\n",
"# We don't pass a loss or metrics here.\n",
"model.compile(optimizer=\"adam\")\n",
"\n",
"# Just use `fit` as usual -- you can use callbacks, etc.\n",
Expand Down Expand Up @@ -531,7 +520,7 @@
"\n",
" def train_step(self, real_images):\n",
" device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
" if isinstance(real_images, tuple):\n",
" if isinstance(real_images, tuple) or isinstance(real_images, list):\n",
" real_images = real_images[0]\n",
" # Sample random points in the latent space\n",
" batch_size = real_images.shape[0]\n",
Expand Down
55 changes: 33 additions & 22 deletions guides/md/custom_train_step_in_torch.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2023/06/27<br>
**Last modified:** 2023/06/27<br>
**Last modified:** 2024/08/01<br>
**Description:** Overriding the training step of the Model class with PyTorch.


Expand Down Expand Up @@ -142,14 +142,14 @@ model.fit(x, y, epochs=3)

<div class="k-default-codeblock">
```
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 551us/step - mae: 0.6533 - loss: 0.6036
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.3410 - loss: 0.1772
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 522us/step - mae: 0.4013 - loss: 0.2522
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.3336 - loss: 0.1695
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 516us/step - mae: 0.3813 - loss: 0.2256
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - mae: 0.3170 - loss: 0.1511
<keras.src.callbacks.history.History at 0x299b7baf0>
<keras.src.callbacks.history.History at 0x7f48a3255710>
```
</div>
Expand Down Expand Up @@ -238,17 +238,17 @@ model.fit(x, y, epochs=5)
<div class="k-default-codeblock">
```
Epoch 1/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 461us/step - loss: 0.2470 - mae: 0.3953
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.6173 - mae: 0.6607
Epoch 2/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 456us/step - loss: 0.2386 - mae: 0.3910
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2340 - mae: 0.3883
Epoch 3/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 456us/step - loss: 0.2359 - mae: 0.3901
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1922 - mae: 0.3517
Epoch 4/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 480us/step - loss: 0.2013 - mae: 0.3572
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1802 - mae: 0.3411
Epoch 5/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 463us/step - loss: 0.1903 - mae: 0.3480
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.1862 - mae: 0.3505
<keras.src.callbacks.history.History at 0x299c5eec0>
<keras.src.callbacks.history.History at 0x7f48975ccbd0>
```
</div>
Expand Down Expand Up @@ -328,13 +328,13 @@ model.fit(x, y, sample_weight=sw, epochs=3)
<div class="k-default-codeblock">
```
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 499us/step - mae: 1.4332 - loss: 1.0769
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.3216 - loss: 0.0827
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 520us/step - mae: 0.9250 - loss: 0.5614
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.3156 - loss: 0.0803
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 502us/step - mae: 0.6069 - loss: 0.2653
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.3085 - loss: 0.0760
<keras.src.callbacks.history.History at 0x299c82bf0>
<keras.src.callbacks.history.History at 0x7f48975d7bd0>
```
</div>
Expand Down Expand Up @@ -378,11 +378,23 @@ y = np.random.random((1000, 1))
model.evaluate(x, y)
```


1/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.8706 - loss: 0.9344

<div class="k-default-codeblock">
```
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 325us/step - mae: 0.4427 - loss: 0.2993

```
</div>
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - mae: 0.8959 - loss: 0.9952



[0.2726495862007141, 0.42286917567253113]


<div class="k-default-codeblock">
```
[1.0077838897705078, 0.8984771370887756]
```
</div>
Expand Down Expand Up @@ -463,7 +475,7 @@ class GAN(keras.Model):

def train_step(self, real_images):
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(real_images, tuple):
if isinstance(real_images, tuple) or isinstance(real_images, list):
real_images = real_images[0]
# Sample random points in the latent space
batch_size = real_images.shape[0]
Expand Down Expand Up @@ -556,9 +568,8 @@ gan.fit(dataloader, epochs=1)

<div class="k-default-codeblock">
```
1094/1094 ━━━━━━━━━━━━━━━━━━━━ 1582s 1s/step - d_loss: 0.3581 - g_loss: 2.0571
<keras.src.callbacks.history.History at 0x299ce1840>
1094/1094 ━━━━━━━━━━━━━━━━━━━━ 394s 360ms/step - d_loss: 0.2436 - g_loss: 4.7259
<keras.src.callbacks.history.History at 0x7f489760a490>
```
</div>
Expand Down

0 comments on commit 687a1d2

Please sign in to comment.