diff --git a/app/Home.py b/app/Home.py index 14e658a..87cf56b 100644 --- a/app/Home.py +++ b/app/Home.py @@ -16,7 +16,7 @@ # st.image("./data/logo/logo.png", width=300) DEFAULT_OBJECT = "water_level" -st.markdown("# Mapa de Alagamentos | Vision AI") +st.markdown("# Identificações | Vision AI") # get cameras cameras = get_cameras( @@ -84,7 +84,6 @@ "timestamp", "object", "label", - "label_explanation", ] selected_row = get_agrid_table( cameras_identifications_merged[selected_cols].reset_index() @@ -113,6 +112,7 @@ ) with col1: + st.markdown("### 📍 Mapa") st_folium(folium_map, key="fig1", height=600, width="100%") # for camera_id in cameras_identifications_filter.index: diff --git a/app/pages/Classificador de Labels.py b/app/pages/Classificador de Labels.py index a74e884..a78d237 100644 --- a/app/pages/Classificador de Labels.py +++ b/app/pages/Classificador de Labels.py @@ -34,7 +34,7 @@ def get_translation(label): }, { "object": "water_in_road", - "title": "Há água na via?", + "title": "Há indício de chuva?", "condition": "Se a resposta for 'Não', associe o rótulo ‘Baixa ou Indiferente’ à opção 3 e pule para 4.", # noqa "explanation": " Inspeção visual para presença de água na pista, que pode variar desde uma leve umidade até condições de alagamento evidente.", # noqa "labels": { @@ -104,6 +104,14 @@ def get_translation(label): pd.DataFrame(data=snapshots), "snapshot_identification" ) # noqa +objects_number = { + object_name: i + 1 + for i, object_name in enumerate( + snapshots_objects["object"].unique().tolist() + ) # noqa +} +snapshots_objects["question_number"] = snapshots_objects["object"].map(objects_number) + def put_selected_label(label, snapshots_options): snapshots_to_put = snapshots_options.to_dict() @@ -162,13 +170,11 @@ def buttom( else: # Get the current image from the DataFrame row = snapshots_objects.iloc[st.session_state.row_index] # noqa - st.write( - f"INDEX: {st.session_state.row_index +1} / {len(snapshots_objects)}" # noqa - ) # noqa # Extract relevant information name = row["object"] translate_dict = get_translation(name) snapshot_url = row["snapshot_url"] + question_number = row["question_number"] labels_options = labels.loc[name] choices = labels_options["value"].tolist() @@ -177,33 +183,31 @@ def buttom( choices = ["true", "false"] # st.write" - col1, col2, col3 = st.columns(3) + col1, col2 = st.columns(2) with col2: st.image(snapshot_url) + with col1: st.markdown( - f"### {translate_dict.get('title')}", + f"### {question_number}. {translate_dict.get('title')}", ) st.markdown( f"**Explicação:** {translate_dict.get('explanation')}", ) # place labels in a grid of 2 columns - col1, col2, col3, col4, col5, col6 = st.columns(6) for i, label in enumerate(choices): label_translated = translate_dict.get("labels").get(label) - - if i % 2 == 0: - with col3: - buttom( - label=label, - label_translated=label_translated, - row=row, - ) - else: - with col4: - buttom( - label=label, - label_translated=label_translated, - row=row, - ) + with col1: + buttom( + label=label, + label_translated=label_translated, + row=row, + ) + # else: + # with col4: + # buttom( + # label=label, + # label_translated=label_translated, + # row=row, + # ) st.session_state.row_index += 1 # noqa diff --git a/app/pages/Visualizar Prompt.py b/app/pages/Visualizar Prompt.py new file mode 100644 index 0000000..0ff7e90 --- /dev/null +++ b/app/pages/Visualizar Prompt.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +import pandas as pd +import streamlit as st +from utils.utils import get_objects, get_objetcs_labels_df, get_prompts + +st.set_page_config(layout="wide", initial_sidebar_state="collapsed") +# st.image("./data/logo/logo.png", width=300) + +st.markdown("# Visualizar Prompt | Vision AI") + +data = get_prompts() +objects = pd.DataFrame(get_objects()) +labels = get_objetcs_labels_df(objects) + +prompt_parameters = data[0] +prompt_text = prompt_parameters.get("prompt_text") +prompt_objects = prompt_parameters.get("objects") + +selected_labels_cols = ["name", "criteria", "identification_guide", "value"] +labels = labels[selected_labels_cols] +labels = labels[labels["name"].isin(prompt_objects)] +objects_table_md = labels.to_markdown(index=False) + + +output_schema = """{\n "$defs": {\n "Object": {\n "properties": {\n "object": {\n"description": "The object from the objects table",\n"title": "Object",\n"type": "string"\n },\n "label_explanation": {\n"description": "Highly detailed visual description of the image given the object context",\n"title": "Label Explanation",\n"type": "string"\n },\n "label": {\n"anyOf": [\n {\n "type": "boolean"\n },\n {\n "type": "string"\n },\n {\n "type": "null"\n }\n],\n"description": "Label indicating the condition or characteristic of the object",\n"title": "Label"\n }\n },\n "required": [\n "object",\n "label_explanation",\n "label"\n ],\n "title": "Object",\n "type": "object"\n }\n },\n "properties": {\n "objects": {\n "items": {\n "$ref": "#/$defs/Object"\n },\n "title": "Objects",\n "type": "array"\n }\n },\n "required": [\n "objects"\n ],\n "title": "Output",\n "type": "object"\n}\n""" # noqa +output_example = """{\n "objects": [\n {\n "object": "",\n "label_explanation": "",\n "label": ""\n }\n ]\n}\n""" # noqa + +prompt_text = ( + prompt_text.replace("{objects_table_md}", objects_table_md) + .replace("{output_schema}", output_schema) + .replace("{output_example}", output_example) +) +st.markdown(prompt_text) diff --git a/app/utils/utils.py b/app/utils/utils.py index c4c4581..bc39d34 100644 --- a/app/utils/utils.py +++ b/app/utils/utils.py @@ -54,12 +54,12 @@ def callback_data(): # ) -@st.cache_data(ttl=600 * 2, persist=False) +@st.cache_data(ttl=60 * 5, persist=False) def get_cameras( only_active=True, use_mock_data=False, update_mock_data=False, - page_size=100, + page_size=3000, timeout=120, ): mock_data_path = "./data/temp/mock_api_data.json" @@ -86,7 +86,7 @@ def get_cameras( return data -@st.cache_data(ttl=600 * 2, persist=False) +@st.cache_data(ttl=60 * 2, persist=False) def get_objects( page_size=100, timeout=120, @@ -97,6 +97,17 @@ def get_objects( return data +@st.cache_data(ttl=60 * 60, persist=False) +def get_prompts( + page_size=100, + timeout=120, +): + data = vision_api._get_all_pages( + path="/prompts", page_size=page_size, timeout=timeout + ) + return data + + def treat_data(response): cameras_aux = pd.read_csv("./data/database/cameras_aux.csv", dtype=str) cameras_aux = cameras_aux.rename(columns={"id_camera": "id"}).set_index( @@ -211,19 +222,24 @@ def display_camera_details(row, cameras_identifications): st.markdown(f"### 📷 Camera snapshot") # noqa st.markdown(f"Endereço: {camera_name}") - # st.markdown(f"Data Snapshot: {snapshot_timestamp}") + st.markdown(f"Data Snapshot: {snapshot_timestamp}") # get cameras_attr url from selected row by id if image_url is None: st.markdown("Falha ao capturar o snapshot da câmera.") else: st.markdown( - f""" """, + f""" """, # noqa unsafe_allow_html=True, ) + st.markdown("



", unsafe_allow_html=True) + + st.markdown(f"### 📃 Identificações") camera_identifications = cameras_identifications.loc[camera_id] # noqa - get_agrid_table(table=camera_identifications.reset_index()) + selected_cols = ["object", "label", "label_explanation"] + + st.dataframe(camera_identifications[selected_cols].reset_index()) def get_icon_color(label: Union[bool, None]): diff --git a/poetry.lock b/poetry.lock index 5477e99..11a9f2d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1818,6 +1818,20 @@ files = [ [package.dependencies] streamlit = ">=0.63" +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + [[package]] name = "tenacity" version = "8.2.3" @@ -2008,4 +2022,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "69f3cecbac03a758c87cd0c84c6d8a0a65aa6f8d10b8d9db9dd82cf2f434333b" +content-hash = "445c96f85889261bd38fb683577bd2295c370e0960834f2db5c8e03802f9b084" diff --git a/pyproject.toml b/pyproject.toml index 969867c..59ce351 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,7 @@ streamlit-extras = "^0.3.5" streamlit-autorefresh = "^1.0.1" pillow = "^10.1.0" streamlit-aggrid = "0.3.4.post3" +tabulate = "^0.9.0" [build-system]