Skip to content

Commit

Permalink
fixed types
Browse files Browse the repository at this point in the history
  • Loading branch information
kozlov721 committed Sep 19, 2024
1 parent 98e1622 commit 3cd7c7a
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 82 deletions.
81 changes: 30 additions & 51 deletions luxonis_train/nodes/backbones/ddrnet/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
@license: U{https://github.com/Deci-AI/super-gradients/blob/master/LICENSE.md}
"""

from typing import Type

import torch
from torch import Tensor, nn

Expand Down Expand Up @@ -184,45 +182,45 @@ def forward(self, x: Tensor) -> Tensor:
return out


class BasicDDRBackBone(nn.Module):
class BasicDDRBackbone(nn.Module):
def __init__(
self,
block: Type[nn.Module],
width: int,
block: type[nn.Module],
stem_channels: int,
layers: list[int],
input_channels: int,
in_channels: int,
layer3_repeats: int = 1,
):
"""Initialize the BasicDDRBackBone with specified parameters.
@type block: Type[nn.Module]
@param block: The block class to use for layers.
@type width: int
@param width: Width of the feature maps.
@type stem_channels: int
@param stem_channels: Number of output channels in the stem layer.
@type layers: list[int]
@param layers: Number of blocks in each layer.
@type input_channels: int
@param input_channels: Number of input channels.
@type in_channels: int
@param in_channels: Number of input channels.
@type layer3_repeats: int
@param layer3_repeats: Number of repeats for layer3. Defaults to
1.
"""
super().__init__()
self.input_channels = input_channels
self.input_channels = in_channels

self.stem = nn.Sequential(
ConvModule(
in_channels=input_channels,
out_channels=width,
in_channels=in_channels,
out_channels=stem_channels,
kernel_size=3,
stride=2,
padding=1,
bias=True,
activation=nn.ReLU(inplace=True),
),
ConvModule(
in_channels=width,
out_channels=width,
in_channels=stem_channels,
out_channels=stem_channels,
kernel_size=3,
stride=2,
padding=1,
Expand All @@ -231,70 +229,51 @@ def __init__(
),
)

self.layer1 = _make_layer(
self.layer1 = make_layer(
block=block,
in_channels=width,
channels=width,
in_channels=stem_channels,
channels=stem_channels,
num_blocks=layers[0],
)

self.layer2 = _make_layer(
self.layer2 = make_layer(
block=block,
in_channels=width,
channels=width * 2,
in_channels=stem_channels,
channels=stem_channels * 2,
num_blocks=layers[1],
stride=2,
)

self.layer3 = nn.ModuleList(
[
_make_layer(
make_layer(
block=block,
in_channels=width * 2,
channels=width * 4,
in_channels=stem_channels * 2,
channels=stem_channels * 4,
num_blocks=layers[2],
stride=2,
)
]
+ [
_make_layer(
make_layer(
block=block,
in_channels=width * 4,
channels=width * 4,
in_channels=stem_channels * 4,
channels=stem_channels * 4,
num_blocks=layers[2],
stride=1,
)
for _ in range(layer3_repeats - 1)
]
)

self.layer4 = _make_layer(
self.layer4 = make_layer(
block=block,
in_channels=width * 4,
channels=width * 8,
in_channels=stem_channels * 4,
channels=stem_channels * 8,
num_blocks=layers[3],
stride=2,
)

def validate_backbone_attributes(self) -> None:
"""Validate the existence of required backbone attributes.
Ensures that the following attributes are present: "stem", "layer1", "layer2",
"layer3", "layer4", "input_channels".
"""
expected_attributes = [
"stem",
"layer1",
"layer2",
"layer3",
"layer4",
"input_channels",
]
for attribute in expected_attributes:
assert hasattr(
self, attribute
), f"Invalid backbone - attribute '{attribute}' is missing"

def get_backbone_output_number_of_channels(self) -> dict[str, int]:
"""Determine the number of output channels for each layer of the
backbone.
Expand All @@ -321,8 +300,8 @@ def get_backbone_output_number_of_channels(self) -> dict[str, int]:
return output_shapes


def _make_layer(
block: Type[nn.Module],
def make_layer(
block: type[nn.Module],
in_channels: int,
channels: int,
num_blocks: int,
Expand Down
51 changes: 21 additions & 30 deletions luxonis_train/nodes/backbones/ddrnet/ddrnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,33 +10,32 @@
UpscaleOnline,
)

from .blocks import (
DAPPM,
BasicDDRBackBone,
_make_layer,
)
from .blocks import DAPPM, BasicDDRBackbone, make_layer
from .variants import get_variant


class DDRNet(BaseNode[Tensor, list[Tensor]]):
in_channels: int

def __init__(
self,
variant: Literal["23-slim", "23"] = "23-slim",
channels: int | None = None,
highres_channels: int | None = None,
use_aux_heads: bool = True,
upscale_module: nn.Module = None,
upscale_module: nn.Module | None = None,
spp_width: int = 128,
ssp_inter_mode: str = "bilinear",
segmentation_inter_mode: str = "bilinear",
# TODO: nn.Module registry
block: Type[nn.Module] = BasicResNetBlock,
skip_block: Type[nn.Module] = BasicResNetBlock,
layer5_block: Type[nn.Module] = Bottleneck,
layer5_bottleneck_expansion: int = 2,
spp_kernel_sizes: list[int] = None,
spp_strides: list[int] = None,
spp_kernel_sizes: list[int] | None = None,
spp_strides: list[int] | None = None,
layer3_repeats: int = 1,
layers: list[int] = None,
layers: list[int] | None = None,
**kwargs,
):
"""DDRNet backbone.
Expand Down Expand Up @@ -96,18 +95,13 @@ def __init__(
@type kwargs: Any
@param kwargs: Additional arguments to pass to L{BaseNode}.
"""

if upscale_module is None:
upscale_module = UpscaleOnline()
if spp_kernel_sizes is None:
spp_kernel_sizes = [1, 5, 9, 17, 0]
if spp_strides is None:
spp_strides = [1, 2, 4, 8, 0]
if layers is None:
layers = [2, 2, 2, 2, 1, 2, 2, 1]

super().__init__(**kwargs)

upscale_module = upscale_module or UpscaleOnline()
spp_kernel_sizes = spp_kernel_sizes or [1, 5, 9, 17, 0]
spp_strides = spp_strides or [1, 2, 4, 8, 0]
layers = layers or [2, 2, 2, 2, 1, 2, 2, 1]

var = get_variant(variant)

channels = channels or var.channels
Expand All @@ -117,8 +111,6 @@ def __init__(
self.upscale = upscale_module
self.ssp_inter_mode = ssp_inter_mode
self.segmentation_inter_mode = segmentation_inter_mode
self.block = block
self.skip_block = skip_block
self.relu = nn.ReLU(inplace=False)
self.layer3_repeats = layer3_repeats
self.channels = channels
Expand All @@ -128,14 +120,13 @@ def __init__(
self.layers[4:],
)

self._backbone = BasicDDRBackBone(
block=self.block,
width=self.channels,
self._backbone = BasicDDRBackbone(
block=block,
stem_channels=self.channels,
layers=self.backbone_layers,
input_channels=self.in_channels,
in_channels=self.in_channels,
layer3_repeats=self.layer3_repeats,
)
self._backbone.validate_backbone_attributes()
out_chan_backbone = (
self._backbone.get_backbone_output_number_of_channels()
)
Expand Down Expand Up @@ -166,7 +157,7 @@ def __init__(
)
)
self.layer3_skip.append(
_make_layer(
make_layer(
in_channels=out_chan_backbone["layer2"]
if i == 0
else highres_channels,
Expand Down Expand Up @@ -205,21 +196,21 @@ def __init__(
),
)

self.layer4_skip = _make_layer(
self.layer4_skip = make_layer(
block=skip_block,
in_channels=highres_channels,
channels=highres_channels,
num_blocks=self.additional_layers[2],
)
self.layer5_skip = _make_layer(
self.layer5_skip = make_layer(
block=layer5_block,
in_channels=highres_channels,
channels=highres_channels,
num_blocks=self.additional_layers[3],
expansion=layer5_bottleneck_expansion,
)

self.layer5 = _make_layer(
self.layer5 = make_layer(
block=layer5_block,
in_channels=out_chan_backbone["layer4"],
channels=out_chan_backbone["layer4"],
Expand Down
5 changes: 4 additions & 1 deletion luxonis_train/nodes/heads/ddrnet_segmentation_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@

class DDRNetSegmentationHead(BaseNode[Tensor, Tensor]):
attach_index: int = -1
in_height: int
in_width: int
in_channels: int

tasks: list[LabelType] = [LabelType.SEGMENTATION]

Expand Down Expand Up @@ -104,4 +107,4 @@ def set_export_mode(self, mode: bool = True) -> None:
if self.export and self.attach_index != -1:
logger.info("Removing the auxiliary head.")

self.forward = lambda x: torch.tensor([])
self.forward = lambda inputs: torch.tensor([])

0 comments on commit 3cd7c7a

Please sign in to comment.