diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 11f0e73..9fd896b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -60,6 +60,16 @@ jobs: run: | pytest tests --capture=no + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Push image to DockerHub + run: | + docker push --all-tags + - name: where am I run: | pwd diff --git a/.gitignore b/.gitignore index ba7c92c..bcd003b 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ runs/ *.zip catboost_info/ .cache +/build/ diff --git a/Dockerfile b/Dockerfile index 5611114..15f7eb6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -57,7 +57,7 @@ ENV PYTORCH_VERSION ${PYTORCH_VERSION} COPY ./ /workspace/ WORKDIR /workspace/ RUN /opt/mamba/bin/python -m pip install --upgrade pip && \ - /opt/mamba/bin/python -m pip install -e .[cv,cv_classification,cv_semantic,cv_detection,nlp,nlp_retrieval,tabular,tabular_classification,dev] && \ + /opt/mamba/bin/python -m pip install -e .[cv,cv_classification,cv_semantic,cv_detection,nlp,nlp_retrieval,ml,dev] && \ /opt/mamba/bin/python -m pip install dvc dvc-gdrive && \ /opt/mamba/bin/python -m pip install -U timm diff --git a/theseus/base/metrics/confusion_matrix.py b/theseus/base/metrics/confusion_matrix.py index c3cb04a..4efd820 100644 --- a/theseus/base/metrics/confusion_matrix.py +++ b/theseus/base/metrics/confusion_matrix.py @@ -24,7 +24,7 @@ def plot_cfm(cm, ax, labels: List): ax.set_xlabel("\nActual") ax.set_ylabel("Predicted ") - ax.xaxis.set_ticklabels(labels) + ax.xaxis.set_ticklabels(labels, rotation=90) ax.yaxis.set_ticklabels(labels, rotation=0) diff --git a/theseus/cv/classification/callbacks/gradcam_callback.py b/theseus/cv/classification/callbacks/gradcam_callback.py index 04e8ea8..9f9160c 100644 --- a/theseus/cv/classification/callbacks/gradcam_callback.py +++ b/theseus/cv/classification/callbacks/gradcam_callback.py @@ -65,17 +65,17 @@ def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): # Vizualize Grad Class Activation Mapping and model predictions LOGGER.text("Visualizing model predictions...", level=LoggerObserver.DEBUG) - images = last_batch["inputs"] + images = last_batch["inputs"].cpu() targets = last_batch["targets"] model.eval() ## Calculate GradCAM and Grad Class Activation Mapping and - model_name = model.model.name + model_name = model.name try: grad_cam = CAMWrapper.get_method( name="gradcam", - model=model.model.get_model(), + model=model.get_model(), model_name=model_name, use_cuda=next(model.parameters()).is_cuda, ) diff --git a/theseus/cv/classification/callbacks/visualize_callback.py b/theseus/cv/classification/callbacks/visualize_callback.py index f3d084d..d63cd88 100644 --- a/theseus/cv/classification/callbacks/visualize_callback.py +++ b/theseus/cv/classification/callbacks/visualize_callback.py @@ -50,7 +50,6 @@ def on_sanity_check_start( trainloader = pl_module.datamodule.trainloader train_batch = next(iter(trainloader)) val_batch = next(iter(valloader)) - try: self.visualize_model(model, train_batch) except TypeError as e: @@ -59,16 +58,19 @@ def on_sanity_check_start( @torch.no_grad() def visualize_model(self, model, batch): + + device = next(model.parameters()).device + # Vizualize Model Graph LOGGER.text("Visualizing architecture...", level=LoggerObserver.DEBUG) LOGGER.log( [ { "tag": "Sanitycheck/analysis/architecture", - "value": model.model.get_model(), + "value": model.get_model(), "type": LoggerObserver.TORCH_MODULE, "kwargs": { - "inputs": move_to(batch["inputs"], model.device), + "inputs": move_to(batch["inputs"], device), "log_freq": 100, }, } @@ -82,7 +84,7 @@ def visualize_gt(self, train_batch, val_batch, iters): LOGGER.text("Visualizing dataset...", level=LoggerObserver.DEBUG) # Train batch - images = train_batch["inputs"] + images = train_batch["inputs"].cpu() batch = [] for idx, inputs in enumerate(images): @@ -107,7 +109,7 @@ def visualize_gt(self, train_batch, val_batch, iters): ) # Validation batch - images = val_batch["inputs"] + images = val_batch["inputs"].cpu() batch = [] for idx, inputs in enumerate(images): @@ -162,12 +164,13 @@ def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): # Vizualize model predictions LOGGER.text("Visualizing model predictions...", level=LoggerObserver.DEBUG) - images = last_batch["inputs"] + images = last_batch["inputs"].cpu() targets = last_batch["targets"] model.eval() + device = next(model.parameters()).device ## Get prediction on last batch - outputs = model.model.get_prediction(last_batch, device=model.device) + outputs = model.get_prediction(last_batch, device=device) label_indices = outputs["labels"] scores = outputs["confidences"] diff --git a/theseus/cv/classification/metrics/errorcases.py b/theseus/cv/classification/metrics/errorcases.py index 5a99bbe..333cbd2 100644 --- a/theseus/cv/classification/metrics/errorcases.py +++ b/theseus/cv/classification/metrics/errorcases.py @@ -53,12 +53,12 @@ def update(self, outputs: Dict[str, Any], batch: Dict[str, Any]): targets = targets.squeeze(-1).long() outputs = outputs.numpy().tolist() - targets = targets.numpy().tolist() + targets = targets.cpu().numpy().tolist() probs = probs.numpy().tolist() for (output, target, prob, image) in zip(outputs, targets, probs, images): if output != target: - self.images.append(image) + self.images.append(image.cpu()) self.preds.append(output) self.targets.append(target) self.probs.append(prob)