Skip to content

Commit

Permalink
Merge pull request #1 from andreped/dev
Browse files Browse the repository at this point in the history
Started setting up the app + docker builds [no ci]
  • Loading branch information
andreped authored May 26, 2023
2 parents abf9ee7 + b72a232 commit 5237ee9
Show file tree
Hide file tree
Showing 11 changed files with 313 additions and 1 deletion.
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
venv/
20 changes: 20 additions & 0 deletions .github/workflows/deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
name: Sync to Hugging Face hub
on:
push:
branches: [ main ]

# to run this workflow manually from the Actions tab
workflow_dispatch:

jobs:
sync-to-hub:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
lfs: true
- name: Push to hub
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
run: git push https://andreped:$HF_TOKEN@huggingface.co/spaces/andreped/neukit main
16 changes: 16 additions & 0 deletions .github/workflows/filesize.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
name: Check file size
on: # or directly `on: [push]` to run the action on every push on any branch
pull_request:
branches: [ main ]

# to run this workflow manually from the Actions tab
workflow_dispatch:

jobs:
sync-to-hub:
runs-on: ubuntu-latest
steps:
- name: Check large files
uses: ActionsDesk/lfs-warning@v2.0
with:
filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
venv/
70 changes: 70 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
# you will also find guides on how best to write your Dockerfile

# creates virtual ubuntu in docker image
FROM ubuntu:22.04

# set language, format and stuff
ENV LANG=C.UTF-8 LC_ALL=C.UTF-8

# NOTE: using -y is conveniently to automatically answer yes to all the questions
# installing python3 with a specific version
RUN apt-get update -y
RUN apt-get upgrade -y
RUN apt install software-properties-common -y
RUN add-apt-repository ppa:deadsnakes/ppa -y
RUN apt update
RUN apt install python3.7 -y
RUN apt install python3.7-distutils -y
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 1

# installing other libraries
RUN apt-get install python3-pip -y && \
apt-get -y install sudo
RUN apt-get install curl -y
RUN apt-get install nano -y
RUN apt-get update && apt-get install -y git
RUN apt-get install libblas-dev -y && apt-get install liblapack-dev -y
RUN apt-get install gfortran -y
RUN apt-get install libpng-dev -y
RUN apt-get install python3-dev -y
# RUN apt-get -y install cmake curl

WORKDIR /code

# install dependencies
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt

# resolve issue with tf==2.4 and gradio dependency collision issue
RUN pip install --force-reinstall typing_extensions==4.0.0

# Install wget
RUN apt install wget -y && \
apt install unzip

# Set up a new user named "user" with user ID 1000
RUN useradd -m -u 1000 user

# Switch to the "user" user
USER user

# Set home to the user's home directory
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH

# Set the working directory to the user's home directory
WORKDIR $HOME/app

# Copy the current directory contents into the container at $HOME/app setting the owner to the user
COPY --chown=user . $HOME/app

# Download pretrained parenchyma model
RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_Meningioma-ONNX-v12.zip" && \
unzip "Raidionics-MRI_Meningioma-ONNX-v12.zip" && mkdir -p resources/models/ && mv MRI_Meningioma/ resources/models/MRI_Meningioma/

# Download test sample
RUN pip install gdown && gdown "https://drive.google.com/uc?id=1shjSrFjS4PHE5sTku30PZTLPZpGu24o3"

# CMD ["/bin/bash"]
CMD ["python3", "demo/app.py"]
29 changes: 28 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,28 @@
# neurokit
---
title: 'neukit: automatic meningioma segmentation from T1-weighted MRI'
colorFrom: indigo
colorTo: indigo
sdk: docker
app_port: 7860
emoji: 🔎
pinned: false
license: mit
app_file: app.py
---

# neukit

## Usage

The software will be made openly available on Hugging Face spaces very soon. Stay tuned for more!

## Setup

For development of this software, follow these steps to build the docker image and run the app through it:

```
docker build -t neukit ..
docker run -it -p 7860:7860 neukit
```

Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.
19 changes: 19 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from neukit.gui import WebUI


def main():
print("Launching demo...")

# cwd = "/Users/andreped/workspace/livermask/" # local testing -> macOS
cwd = "/home/user/app/" # production -> docker

model_name = "model.h5" # assumed to lie in `cwd` directory
class_name = "parenchyma"

# initialize and run app
app = WebUI(model_name=model_name, class_name=class_name, cwd=cwd)
app.run()


if __name__ == "__main__":
main()
Empty file added neukit/__init__.py
Empty file.
94 changes: 94 additions & 0 deletions neukit/gui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import gradio as gr
from .utils import load_ct_to_numpy, load_pred_volume_to_numpy
from .compute import run_model
from .convert import nifti_to_glb


class WebUI:
def __init__(self, model_name:str = None, class_name:str = None, cwd:str = None):
# global states
self.images = []
self.pred_images = []

# @TODO: This should be dynamically set based on chosen volume size
self.nb_slider_items = 100

self.model_name = model_name
self.class_name = class_name
self.cwd = cwd

# define widgets not to be rendered immediantly, but later on
self.slider = gr.Slider(1, self.nb_slider_items, value=1, step=1, label="Which 2D slice to show")
self.volume_renderer = gr.Model3D(
clear_color=[0.0, 0.0, 0.0, 0.0],
label="3D Model",
visible=True
).style(height=512)

def combine_ct_and_seg(self, img, pred):
return (img, [(pred, self.class_name)])

def upload_file(self, file):
return file.name

def load_mesh(self, mesh_file_name, model_name):
path = mesh_file_name.name
run_model(path, model_name)
nifti_to_glb("prediction-livermask.nii")
self.images = load_ct_to_numpy(path)
self.pred_images = load_pred_volume_to_numpy("./prediction-livermask.nii")
self.slider = self.slider.update(value=2)
return "./prediction.obj"

def get_img_pred_pair(self, k):
k = int(k) - 1
out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
out[k] = gr.AnnotatedImage.update(self.combine_ct_and_seg(self.images[k], self.pred_images[k]), visible=True)
return out

def run(self):
with gr.Blocks() as demo:

with gr.Row().style(equal_height=True):
file_output = gr.File(
file_types=[".nii", ".nii.nz"],
file_count="single"
).style(full_width=False, size="sm")
file_output.upload(self.upload_file, file_output, file_output)

run_btn = gr.Button("Run analysis").style(full_width=False, size="sm")
run_btn.click(
fn=lambda x: self.load_mesh(x, model_name=self.cwd + self.model_name),
inputs=file_output,
outputs=self.volume_renderer
)

with gr.Row().style(equal_height=True):
gr.Examples(
examples=[self.cwd + "test-volume.nii"],
inputs=file_output,
outputs=file_output,
fn=self.upload_file,
cache_examples=True,
)

with gr.Row().style(equal_height=True):
with gr.Box():
image_boxes = []
for i in range(self.nb_slider_items):
visibility = True if i == 1 else False
t = gr.AnnotatedImage(visible=visibility)\
.style(color_map={self.class_name: "#ffae00"}, height=512, width=512)
image_boxes.append(t)

self.slider.change(self.get_img_pred_pair, self.slider, image_boxes)

with gr.Box():
self.volume_renderer.render()

with gr.Row():
self.slider.render()

# sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
# inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
61 changes: 61 additions & 0 deletions neukit/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import numpy as np
import nibabel as nib
from nibabel.processing import resample_to_output
from skimage.measure import marching_cubes


def load_ct_to_numpy(data_path):
if type(data_path) != str:
data_path = data_path.name

image = nib.load(data_path)
data = image.get_fdata()

data = np.rot90(data, k=1, axes=(0, 1))

data[data < -150] = -150
data[data > 250] = 250

data = data - np.amin(data)
data = data / np.amax(data) * 255
data = data.astype("uint8")

print(data.shape)
return [data[..., i] for i in range(data.shape[-1])]


def load_pred_volume_to_numpy(data_path):
if type(data_path) != str:
data_path = data_path.name

image = nib.load(data_path)
data = image.get_fdata()

data = np.rot90(data, k=1, axes=(0, 1))

data[data > 0] = 1
data = data.astype("uint8")

print(data.shape)
return [data[..., i] for i in range(data.shape[-1])]


def nifti_to_glb(path, output="prediction.obj"):
# load NIFTI into numpy array
image = nib.load(path)
resampled = resample_to_output(image, [1, 1, 1], order=1)
data = resampled.get_fdata().astype("uint8")

# extract surface
verts, faces, normals, values = marching_cubes(data, 0)
faces += 1

with open(output, 'w') as thefile:
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))

for item in normals:
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))

for item in faces:
thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
3 changes: 3 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
raidionicsrads @ https://github.com/dbouget/raidionics_rads_lib/releases/download/v1.1.0/raidionicsrads-1.1.0-py3-none-manylinux1_x86_64.whl
onnxruntime-gpu==1.12.1
gradio==3.32.0

0 comments on commit 5237ee9

Please sign in to comment.