diff --git a/docs/diffusion/ddpm/unet.html b/docs/diffusion/ddpm/unet.html index f12e1074..bd925fd1 100644 --- a/docs/diffusion/ddpm/unet.html +++ b/docs/diffusion/ddpm/unet.html @@ -93,7 +93,7 @@
diff --git a/docs/normalization/group_norm/experiment.html b/docs/normalization/group_norm/experiment.html index 7f678b40..7bb3018f 100644 --- a/docs/normalization/group_norm/experiment.html +++ b/docs/normalization/group_norm/experiment.html @@ -80,7 +80,7 @@
This derives from the generic VGG style architecture.
21class Model(Module):
21class Model(CIFAR10VGGModel):
26 def __init__(self, groups: int = 32):
-27 super().__init__()
-28 layers = []
28 def conv_block(self, in_channels, out_channels) -> nn.Module:
+29 return nn.Sequential(
+30 nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
+31 fnorm.GroupNorm(self.groups, out_channels),#new
+32 nn.ReLU(inplace=True),
+33 )
30 in_channels = 3
35 def __init__(self, groups: int = 32):
+36 self.groups = groups#input param:groups to conv_block
+37 super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
Number of channels in each layer in each block
- +32 for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
40class Configs(CIFAR10Configs):
Convolution, Normalization and Activation layers
+Number of groups
34 for channels in block:
-35 layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),
-36 GroupNorm(groups, channels),
-37 nn.ReLU(inplace=True)]
-38 in_channels = channels
42 groups: int = 16
40 layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
45@option(Configs.model)
+46def model(c: Configs):
Create a sequential model with the layers
- +43 self.layers = nn.Sequential(*layers)
50 return Model(c.groups).to(c.device)
45 self.fc = nn.Linear(512, 10)
53def main():
47 def forward(self, x):
55 experiment.create(name='cifar10', comment='group norm')
The VGG layers
+Create configurations
49 x = self.layers(x)
57 conf = Configs()
Reshape for classification layer
+Load configurations
51 x = x.view(x.shape[0], -1)
59 experiment.configs(conf, {
+60 'optimizer.optimizer': 'Adam',
+61 'optimizer.learning_rate': 2.5e-4,
+62 })
Final linear layer
+Start the experiment and run the training loop
53 return self.fc(x)
64 with experiment.start():
+65 conf.run()
56class Configs(CIFAR10Configs):
Number of groups
- -58 groups: int = 16
61@option(Configs.model)
-62def model(c: Configs):
66 return Model(c.groups).to(c.device)
69def main():
Create experiment
- -71 experiment.create(name='cifar10', comment='group norm')
Create configurations
- -73 conf = Configs()
Load configurations
- -75 experiment.configs(conf, {
-76 'optimizer.optimizer': 'Adam',
-77 'optimizer.learning_rate': 2.5e-4,
-78 })
Start the experiment and run the training loop
- -80 with experiment.start():
-81 conf.run()
85if __name__ == '__main__':
-86 main()
69if __name__ == '__main__':
+70 main()