From 19d53de7c70b6159fa886fc9bb599597aaae5ab3 Mon Sep 17 00:00:00 2001
From: KalyanMurapaka45 <kalyanmurapaka274@gmail.com>
Date: Thu, 21 Mar 2024 18:24:08 +0530
Subject: [PATCH] Model Files Update

---
 .github/workflows/aws.yaml                    |  57 +++
 app.py                                        |   6 +-
 .../GeminiMed/app.py                          |   2 +-
 .../GeminiMed => }/MedicineRecognition/app.py |   1 +
 src/gen_ai/.gitignore                         | 163 --------
 src/gen_ai/LICENSE                            |  21 -
 .../Language_Model/Instructions.txt           |   3 -
 .../NoteBook_Experiments/Research.ipynb       | 371 ------------------
 src/gen_ai/README.md                          | 114 ------
 src/gen_ai/app.py                             |  69 ----
 src/gen_ai/prompts.py                         |  44 ---
 src/gen_ai/requirements.txt                   |   8 -
 src/gen_ai/setup.py                           |  11 -
 src/gen_ai/src/__init__.py                    |   0
 src/gen_ai/src/helper.py                      |  21 -
 src/gen_ai/src/prompt.py                      |  10 -
 src/gen_ai/store_index.py                     |  28 --
 src/gen_ai/template.py                        |  38 --
 18 files changed, 62 insertions(+), 905 deletions(-)
 create mode 100644 .github/workflows/aws.yaml
 rename src/{gen_ai/NoteBook_Experiments => }/GeminiMed/app.py (96%)
 rename src/{gen_ai/NoteBook_Experiments/GeminiMed => }/MedicineRecognition/app.py (94%)
 delete mode 100644 src/gen_ai/.gitignore
 delete mode 100644 src/gen_ai/LICENSE
 delete mode 100644 src/gen_ai/NoteBook_Experiments/Language_Model/Instructions.txt
 delete mode 100644 src/gen_ai/NoteBook_Experiments/Research.ipynb
 delete mode 100644 src/gen_ai/README.md
 delete mode 100644 src/gen_ai/app.py
 delete mode 100644 src/gen_ai/prompts.py
 delete mode 100644 src/gen_ai/requirements.txt
 delete mode 100644 src/gen_ai/setup.py
 delete mode 100644 src/gen_ai/src/__init__.py
 delete mode 100644 src/gen_ai/src/helper.py
 delete mode 100644 src/gen_ai/src/prompt.py
 delete mode 100644 src/gen_ai/store_index.py
 delete mode 100644 src/gen_ai/template.py

diff --git a/.github/workflows/aws.yaml b/.github/workflows/aws.yaml
new file mode 100644
index 0000000..5f02785
--- /dev/null
+++ b/.github/workflows/aws.yaml
@@ -0,0 +1,57 @@
+name: Deploy Application Docker Image to EC2 instance
+
+on:
+  push:
+    branches: [main]
+
+jobs:
+  Continuous-Integration:
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v2
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
+
+      - name: Login to Amazon ECR
+        id: login-ecr
+        uses: aws-actions/amazon-ecr-login@v1
+
+      - name: Build, tag, and push image to Amazon ECR
+        id: build-image
+        env:
+          ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
+          ECR_REPOSITORY: ${{ secrets.ECR_REPO }}
+          IMAGE_TAG: latest
+        run: |
+          docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .  
+          docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
+          echo "::set-output name=image::$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG"
+
+  Continuous-Deployment:
+    needs: Continuous-Integration
+    runs-on: self-hosted
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v3
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
+
+      - name: Login to Amazon ECR
+        id: login-ecr
+        uses: aws-actions/amazon-ecr-login@v1
+
+      - name: Run Docker Image to serve users
+        run: |
+          docker run -d -e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" -e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" -e AWS_DEFAULT_REGION="${{ secrets.AWS_DEFAULT_REGION }}" -e MONGODB_URL="${{ secrets.MONGODB_URL }}" -p 8080:8080 "${{ steps.login-ecr.outputs.registry }}"/"${{ secrets.ECR_REPO }}":latest
diff --git a/app.py b/app.py
index 2536ea4..22280ce 100644
--- a/app.py
+++ b/app.py
@@ -19,7 +19,7 @@
 kidney_model = load_model('Artifacts\Kidney_Disease\Kidney_Model.h5')
 #lung_model = load_model('Artifacts\Lung_Disease\Lung_Model.h5')
 livermodel = pickle.load(open('Artifacts\Liver_Disease\Liver_Model.pkl', 'rb'))
-liverpreprocessor = pickle.load(open('Artifacts\Liver_Disease\Liver_Preprocessor.pkl', 'rb'))
+#liverpreprocessor = pickle.load(open('Artifacts\Liver_Disease\Liver_Preprocessor.pkl', 'rb'))
 
 
 
@@ -264,8 +264,8 @@ def liver():
                             albumin, albumin_globulin_ratio]).reshape(1, -1)
         
         # Make prediction - (livermodel and liverpreprocessor assumed to be defined elsewhere)
-        prediction = livermodel.predict(liverpreprocessor.transform(features))[0]
-        probability = livermodel.predict_proba(liverpreprocessor.transform(features))[0][1]
+        prediction = livermodel.predict(features)[0]
+        probability = livermodel.predict_proba(features)[0][1]
         
         # Prepare response
         if prediction == 1:
diff --git a/src/gen_ai/NoteBook_Experiments/GeminiMed/app.py b/src/GeminiMed/app.py
similarity index 96%
rename from src/gen_ai/NoteBook_Experiments/GeminiMed/app.py
rename to src/GeminiMed/app.py
index a960dd2..56670f9 100644
--- a/src/gen_ai/NoteBook_Experiments/GeminiMed/app.py
+++ b/src/GeminiMed/app.py
@@ -22,7 +22,7 @@ def get_gemini_response(question):
     response = model.generate_content(question)
     return response.text
 
-st.title('GeminiMed: GPT-4.5')
+st.title('OpenHealth: GPT-4.5')
 
 user_input = st.text_input("Enter Your Medical Query Here:")
 
diff --git a/src/gen_ai/NoteBook_Experiments/GeminiMed/MedicineRecognition/app.py b/src/MedicineRecognition/app.py
similarity index 94%
rename from src/gen_ai/NoteBook_Experiments/GeminiMed/MedicineRecognition/app.py
rename to src/MedicineRecognition/app.py
index 7b6a4ce..b0d73e0 100644
--- a/src/gen_ai/NoteBook_Experiments/GeminiMed/MedicineRecognition/app.py
+++ b/src/MedicineRecognition/app.py
@@ -28,5 +28,6 @@ def validate(validation_prompt):
 uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'png', 'jpeg'])
 if uploaded_file is not None:
     image = Image.open(uploaded_file)
+    st.image(image, caption='Uploaded Image.', use_column_width=True)
     response_text = gen_image(question, image)
     st.write(response_text)
\ No newline at end of file
diff --git a/src/gen_ai/.gitignore b/src/gen_ai/.gitignore
deleted file mode 100644
index 95a2861..0000000
--- a/src/gen_ai/.gitignore
+++ /dev/null
@@ -1,163 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-#  Usually these files are written by a python script from a template
-#  before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-llama-2-7b-chat.ggmlv3.q4_0.bin
-
-# pyenv
-#   For a library or package, you might want to ignore these files since the code is
-#   intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-#   However, in case of collaboration, if having platform-specific dependencies or dependencies
-#   having no cross-platform support, pipenv may install dependencies that don't work, or not
-#   install all needed dependencies.
-#Pipfile.lock
-
-# poetry
-#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
-#   This is especially recommended for binary packages to ensure reproducibility, and is more
-#   commonly ignored for libraries.
-#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
-#poetry.lock
-
-# pdm
-#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
-#pdm.lock
-#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
-#   in version control.
-#   https://pdm.fming.dev/#use-with-ide
-.pdm.toml
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-# PyCharm
-#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
-#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
-#  and can be added to the global gitignore or merged into this file.  For a more nuclear
-#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
-
-model/llama-2-7b-chat.ggmlv3.q4_0.bin
diff --git a/src/gen_ai/LICENSE b/src/gen_ai/LICENSE
deleted file mode 100644
index 93bc26f..0000000
--- a/src/gen_ai/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2023 BAPPY AHMED
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/src/gen_ai/NoteBook_Experiments/Language_Model/Instructions.txt b/src/gen_ai/NoteBook_Experiments/Language_Model/Instructions.txt
deleted file mode 100644
index 36832bf..0000000
--- a/src/gen_ai/NoteBook_Experiments/Language_Model/Instructions.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Model: llama-2-7b-chat.ggmlv3.q4_0.bin
-
-URL: https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
\ No newline at end of file
diff --git a/src/gen_ai/NoteBook_Experiments/Research.ipynb b/src/gen_ai/NoteBook_Experiments/Research.ipynb
deleted file mode 100644
index 6fab6e4..0000000
--- a/src/gen_ai/NoteBook_Experiments/Research.ipynb
+++ /dev/null
@@ -1,371 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 28,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "OK!\n"
-     ]
-    }
-   ],
-   "source": [
-    "print(\"OK!\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 29,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from langchain import PromptTemplate\n",
-    "from langchain.chains import RetrievalQA\n",
-    "from langchain.embeddings import HuggingFaceEmbeddings\n",
-    "from langchain.vectorstores import Pinecone\n",
-    "import pinecone\n",
-    "from langchain.document_loaders import PyPDFLoader, DirectoryLoader\n",
-    "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
-    "from langchain.prompts import PromptTemplate\n",
-    "from langchain.llms import CTransformers\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 30,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "PINECONE_API_KEY = \"d520cf27-507d-407b-87dd-6f68aae7ac30\"\n",
-    "\n",
-    "PINECONE_API_ENV = \"gcp-starter\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 31,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#Extract data from the PDF\n",
-    "def load_pdf(data):\n",
-    "    loader = DirectoryLoader(data,\n",
-    "                    glob=\"*.pdf\",\n",
-    "                    loader_cls=PyPDFLoader)\n",
-    "    \n",
-    "    documents = loader.load()\n",
-    "\n",
-    "    return documents"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 32,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "extracted_data = load_pdf(\"../data/\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 33,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# extracted_data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 34,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#Create text chunks\n",
-    "def text_split(extracted_data):\n",
-    "    text_splitter = RecursiveCharacterTextSplitter(chunk_size = 500, chunk_overlap = 20)\n",
-    "    text_chunks = text_splitter.split_documents(extracted_data)\n",
-    "\n",
-    "    return text_chunks"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 35,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "length of my chunk: 7020\n"
-     ]
-    }
-   ],
-   "source": [
-    "text_chunks = text_split(extracted_data)\n",
-    "print(\"length of my chunk:\", len(text_chunks))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 36,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# text_chunks"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 37,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#download embedding model\n",
-    "def download_hugging_face_embeddings():\n",
-    "    embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n",
-    "    return embeddings"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 38,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "embeddings = download_hugging_face_embeddings()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 39,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "HuggingFaceEmbeddings(client=SentenceTransformer(\n",
-       "  (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel \n",
-       "  (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})\n",
-       "  (2): Normalize()\n",
-       "), model_name='sentence-transformers/all-MiniLM-L6-v2', cache_folder=None, model_kwargs={}, encode_kwargs={})"
-      ]
-     },
-     "execution_count": 39,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "embeddings"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 40,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Length 384\n"
-     ]
-    }
-   ],
-   "source": [
-    "query_result = embeddings.embed_query(\"Hello world\")\n",
-    "print(\"Length\", len(query_result))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 41,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# query_result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 52,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#Initializing the Pinecone\n",
-    "pinecone.init(api_key=PINECONE_API_KEY,environment=PINECONE_API_ENV)\n",
-    "\n",
-    "index_name=\"medchat\"\n",
-    "\n",
-    "#Creating Embeddings for Each of The Text Chunks & storing\n",
-    "docsearch=Pinecone.from_texts([t.page_content for t in text_chunks], embeddings, index_name=index_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 53,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Result [Document(page_content=\"GALE ENCYCLOPEDIA OF MEDICINE 2 117Allergies\\nAllergic rhinitis is commonly triggered by\\nexposure to household dust, animal fur,or pollen. The foreign substance thattriggers an allergic reaction is calledan allergen.\\nThe presence of an allergen causes the\\nbody's lymphocytes to begin producingIgE antibodies. The lymphocytes of an allergy sufferer produce an unusuallylarge amount of IgE.\\nIgE molecules attach to mast\\ncells, which contain histamine.HistaminePollen grains\\nLymphocyte\\nFIRST EXPOSURE\", metadata={}), Document(page_content='allergens are the following:\\n• plant pollens\\n• animal fur and dander\\n• body parts from house mites (microscopic creatures\\nfound in all houses)\\n• house dust• mold spores• cigarette smoke• solvents• cleaners\\nCommon food allergens include the following:\\n• nuts, especially peanuts, walnuts, and brazil nuts\\n• fish, mollusks, and shellfish• eggs• wheat• milk• food additives and preservatives\\nThe following types of drugs commonly cause aller-\\ngic reactions:\\n• penicillin or other antibiotics', metadata={}), Document(page_content='itchy, scratchy nose, eyes, and throat common in aller-gic rhinitis.\\nThe number of possible airborne allergens is enor-', metadata={})]\n"
-     ]
-    }
-   ],
-   "source": [
-    "#If we already have an index we can load it like this\n",
-    "docsearch=Pinecone.from_existing_index(index_name, embeddings)\n",
-    "\n",
-    "query = \"What are Allergies\"\n",
-    "\n",
-    "docs=docsearch.similarity_search(query, k=3)\n",
-    "\n",
-    "print(\"Result\", docs)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 54,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prompt_template=\"\"\"\n",
-    "Use the following pieces of information to answer the user's question.\n",
-    "If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
-    "\n",
-    "Context: {context}\n",
-    "Question: {question}\n",
-    "\n",
-    "Only return the helpful answer below and nothing else.\n",
-    "Helpful answer:\n",
-    "\"\"\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 55,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "PROMPT=PromptTemplate(template=prompt_template, input_variables=[\"context\", \"question\"])\n",
-    "chain_type_kwargs={\"prompt\": PROMPT}"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 56,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "llm=CTransformers(model=\"../model\\llama-2-7b-chat.ggmlv3.q4_0.bin\",\n",
-    "                  model_type=\"llama\",\n",
-    "                  config={'max_new_tokens':512,\n",
-    "                          'temperature':0.8})"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 57,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "qa=RetrievalQA.from_chain_type(\n",
-    "    llm=llm, \n",
-    "    chain_type=\"stuff\", \n",
-    "    retriever=docsearch.as_retriever(search_kwargs={'k': 2}),\n",
-    "    return_source_documents=True, \n",
-    "    chain_type_kwargs=chain_type_kwargs)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 58,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Response :  Allergies occur when your body's immune system mistakenly identifies a harmless substance, such as pollen, dust, or a specific food, as a threat. When this substance enters your body, your immune system produces antibodies to fight it off, leading to the release of chemical mediators, such as histamine, which cause allergic symptoms. The most common allergy triggers include pollen, dust mites, mold, pet dander, and certain foods, such as nuts, fish, shellfish, eggs, milk, and wheat.\n"
-     ]
-    },
-    {
-     "ename": "KeyboardInterrupt",
-     "evalue": "",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
-      "Cell \u001b[1;32mIn[58], line 3\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m      2\u001b[0m     user_input\u001b[38;5;241m=\u001b[39m\u001b[38;5;28minput\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInput Prompt:\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m----> 3\u001b[0m     result\u001b[38;5;241m=\u001b[39m\u001b[43mqa\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mquery\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43muser_input\u001b[49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m      4\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mResponse : \u001b[39m\u001b[38;5;124m\"\u001b[39m, result[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mresult\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:181\u001b[0m, in \u001b[0;36mChain.__call__\u001b[1;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)\u001b[0m\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    180\u001b[0m     run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[1;32m--> 181\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[0;32m    182\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[0;32m    183\u001b[0m final_outputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(\n\u001b[0;32m    184\u001b[0m     inputs, outputs, return_only_outputs\n\u001b[0;32m    185\u001b[0m )\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:175\u001b[0m, in \u001b[0;36mChain.__call__\u001b[1;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)\u001b[0m\n\u001b[0;32m    169\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[0;32m    170\u001b[0m     dumpd(\u001b[38;5;28mself\u001b[39m),\n\u001b[0;32m    171\u001b[0m     inputs,\n\u001b[0;32m    172\u001b[0m )\n\u001b[0;32m    173\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m    174\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m--> 175\u001b[0m         \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    176\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[0;32m    177\u001b[0m         \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[0;32m    178\u001b[0m     )\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    180\u001b[0m     run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\retrieval_qa\\base.py:131\u001b[0m, in \u001b[0;36mBaseRetrievalQA._call\u001b[1;34m(self, inputs, run_manager)\u001b[0m\n\u001b[0;32m    129\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    130\u001b[0m     docs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_docs(question)  \u001b[38;5;66;03m# type: ignore[call-arg]\u001b[39;00m\n\u001b[1;32m--> 131\u001b[0m answer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcombine_documents_chain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    132\u001b[0m \u001b[43m    \u001b[49m\u001b[43minput_documents\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdocs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mquestion\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquestion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_run_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    133\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    135\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mreturn_source_documents:\n\u001b[0;32m    136\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m {\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_key: answer, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_documents\u001b[39m\u001b[38;5;124m\"\u001b[39m: docs}\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:320\u001b[0m, in \u001b[0;36mChain.run\u001b[1;34m(self, callbacks, tags, metadata, *args, **kwargs)\u001b[0m\n\u001b[0;32m    315\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m(args[\u001b[38;5;241m0\u001b[39m], callbacks\u001b[38;5;241m=\u001b[39mcallbacks, tags\u001b[38;5;241m=\u001b[39mtags, metadata\u001b[38;5;241m=\u001b[39mmetadata)[\n\u001b[0;32m    316\u001b[0m         _output_key\n\u001b[0;32m    317\u001b[0m     ]\n\u001b[0;32m    319\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m args:\n\u001b[1;32m--> 320\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetadata\u001b[49m\u001b[43m)\u001b[49m[\n\u001b[0;32m    321\u001b[0m         _output_key\n\u001b[0;32m    322\u001b[0m     ]\n\u001b[0;32m    324\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m kwargs \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m args:\n\u001b[0;32m    325\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    326\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`run` supported with either positional arguments or keyword arguments,\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    327\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m but none were provided.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    328\u001b[0m     )\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:181\u001b[0m, in \u001b[0;36mChain.__call__\u001b[1;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)\u001b[0m\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    180\u001b[0m     run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[1;32m--> 181\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[0;32m    182\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[0;32m    183\u001b[0m final_outputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(\n\u001b[0;32m    184\u001b[0m     inputs, outputs, return_only_outputs\n\u001b[0;32m    185\u001b[0m )\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:175\u001b[0m, in \u001b[0;36mChain.__call__\u001b[1;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)\u001b[0m\n\u001b[0;32m    169\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[0;32m    170\u001b[0m     dumpd(\u001b[38;5;28mself\u001b[39m),\n\u001b[0;32m    171\u001b[0m     inputs,\n\u001b[0;32m    172\u001b[0m )\n\u001b[0;32m    173\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m    174\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m--> 175\u001b[0m         \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    176\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[0;32m    177\u001b[0m         \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[0;32m    178\u001b[0m     )\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    180\u001b[0m     run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\combine_documents\\base.py:106\u001b[0m, in \u001b[0;36mBaseCombineDocumentsChain._call\u001b[1;34m(self, inputs, run_manager)\u001b[0m\n\u001b[0;32m    104\u001b[0m \u001b[38;5;66;03m# Other keys are assumed to be needed for LLM prediction\u001b[39;00m\n\u001b[0;32m    105\u001b[0m other_keys \u001b[38;5;241m=\u001b[39m {k: v \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m inputs\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minput_key}\n\u001b[1;32m--> 106\u001b[0m output, extra_return_dict \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcombine_docs(\n\u001b[0;32m    107\u001b[0m     docs, callbacks\u001b[38;5;241m=\u001b[39m_run_manager\u001b[38;5;241m.\u001b[39mget_child(), \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mother_keys\n\u001b[0;32m    108\u001b[0m )\n\u001b[0;32m    109\u001b[0m extra_return_dict[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_key] \u001b[38;5;241m=\u001b[39m output\n\u001b[0;32m    110\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m extra_return_dict\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\combine_documents\\stuff.py:165\u001b[0m, in \u001b[0;36mStuffDocumentsChain.combine_docs\u001b[1;34m(self, docs, callbacks, **kwargs)\u001b[0m\n\u001b[0;32m    163\u001b[0m inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_inputs(docs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    164\u001b[0m \u001b[38;5;66;03m# Call predict on the LLM.\u001b[39;00m\n\u001b[1;32m--> 165\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mllm_chain\u001b[38;5;241m.\u001b[39mpredict(callbacks\u001b[38;5;241m=\u001b[39mcallbacks, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39minputs), {}\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\llm.py:252\u001b[0m, in \u001b[0;36mLLMChain.predict\u001b[1;34m(self, callbacks, **kwargs)\u001b[0m\n\u001b[0;32m    237\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpredict\u001b[39m(\u001b[38;5;28mself\u001b[39m, callbacks: Callbacks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mstr\u001b[39m:\n\u001b[0;32m    238\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Format prompt with kwargs and pass to LLM.\u001b[39;00m\n\u001b[0;32m    239\u001b[0m \n\u001b[0;32m    240\u001b[0m \u001b[38;5;124;03m    Args:\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    250\u001b[0m \u001b[38;5;124;03m            completion = llm.predict(adjective=\"funny\")\u001b[39;00m\n\u001b[0;32m    251\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 252\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_key]\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:181\u001b[0m, in \u001b[0;36mChain.__call__\u001b[1;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)\u001b[0m\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    180\u001b[0m     run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[1;32m--> 181\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[0;32m    182\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[0;32m    183\u001b[0m final_outputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(\n\u001b[0;32m    184\u001b[0m     inputs, outputs, return_only_outputs\n\u001b[0;32m    185\u001b[0m )\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\base.py:175\u001b[0m, in \u001b[0;36mChain.__call__\u001b[1;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, include_run_info)\u001b[0m\n\u001b[0;32m    169\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[0;32m    170\u001b[0m     dumpd(\u001b[38;5;28mself\u001b[39m),\n\u001b[0;32m    171\u001b[0m     inputs,\n\u001b[0;32m    172\u001b[0m )\n\u001b[0;32m    173\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m    174\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m--> 175\u001b[0m         \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    176\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[0;32m    177\u001b[0m         \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[0;32m    178\u001b[0m     )\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    180\u001b[0m     run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\llm.py:92\u001b[0m, in \u001b[0;36mLLMChain._call\u001b[1;34m(self, inputs, run_manager)\u001b[0m\n\u001b[0;32m     87\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_call\u001b[39m(\n\u001b[0;32m     88\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m     89\u001b[0m     inputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any],\n\u001b[0;32m     90\u001b[0m     run_manager: Optional[CallbackManagerForChainRun] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m     91\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Dict[\u001b[38;5;28mstr\u001b[39m, \u001b[38;5;28mstr\u001b[39m]:\n\u001b[1;32m---> 92\u001b[0m     response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     93\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcreate_outputs(response)[\u001b[38;5;241m0\u001b[39m]\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\chains\\llm.py:102\u001b[0m, in \u001b[0;36mLLMChain.generate\u001b[1;34m(self, input_list, run_manager)\u001b[0m\n\u001b[0;32m    100\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Generate LLM result from inputs.\"\"\"\u001b[39;00m\n\u001b[0;32m    101\u001b[0m prompts, stop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_prompts(input_list, run_manager\u001b[38;5;241m=\u001b[39mrun_manager)\n\u001b[1;32m--> 102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mllm\u001b[38;5;241m.\u001b[39mgenerate_prompt(\n\u001b[0;32m    103\u001b[0m     prompts,\n\u001b[0;32m    104\u001b[0m     stop,\n\u001b[0;32m    105\u001b[0m     callbacks\u001b[38;5;241m=\u001b[39mrun_manager\u001b[38;5;241m.\u001b[39mget_child() \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m    106\u001b[0m     \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mllm_kwargs,\n\u001b[0;32m    107\u001b[0m )\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\llms\\base.py:141\u001b[0m, in \u001b[0;36mBaseLLM.generate_prompt\u001b[1;34m(self, prompts, stop, callbacks, **kwargs)\u001b[0m\n\u001b[0;32m    133\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mgenerate_prompt\u001b[39m(\n\u001b[0;32m    134\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m    135\u001b[0m     prompts: List[PromptValue],\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    138\u001b[0m     \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[0;32m    139\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m LLMResult:\n\u001b[0;32m    140\u001b[0m     prompt_strings \u001b[38;5;241m=\u001b[39m [p\u001b[38;5;241m.\u001b[39mto_string() \u001b[38;5;28;01mfor\u001b[39;00m p \u001b[38;5;129;01min\u001b[39;00m prompts]\n\u001b[1;32m--> 141\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgenerate(prompt_strings, stop\u001b[38;5;241m=\u001b[39mstop, callbacks\u001b[38;5;241m=\u001b[39mcallbacks, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\llms\\base.py:234\u001b[0m, in \u001b[0;36mBaseLLM.generate\u001b[1;34m(self, prompts, stop, callbacks, tags, metadata, **kwargs)\u001b[0m\n\u001b[0;32m    228\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    229\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAsked to cache, but no cache found at `langchain.cache`.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    230\u001b[0m         )\n\u001b[0;32m    231\u001b[0m     run_managers \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_llm_start(\n\u001b[0;32m    232\u001b[0m         dumpd(\u001b[38;5;28mself\u001b[39m), prompts, invocation_params\u001b[38;5;241m=\u001b[39mparams, options\u001b[38;5;241m=\u001b[39moptions\n\u001b[0;32m    233\u001b[0m     )\n\u001b[1;32m--> 234\u001b[0m     output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_generate_helper(\n\u001b[0;32m    235\u001b[0m         prompts, stop, run_managers, \u001b[38;5;28mbool\u001b[39m(new_arg_supported), \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs\n\u001b[0;32m    236\u001b[0m     )\n\u001b[0;32m    237\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m output\n\u001b[0;32m    238\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(missing_prompts) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\llms\\base.py:178\u001b[0m, in \u001b[0;36mBaseLLM._generate_helper\u001b[1;34m(self, prompts, stop, run_managers, new_arg_supported, **kwargs)\u001b[0m\n\u001b[0;32m    176\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m run_manager \u001b[38;5;129;01min\u001b[39;00m run_managers:\n\u001b[0;32m    177\u001b[0m         run_manager\u001b[38;5;241m.\u001b[39mon_llm_error(e)\n\u001b[1;32m--> 178\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[0;32m    179\u001b[0m flattened_outputs \u001b[38;5;241m=\u001b[39m output\u001b[38;5;241m.\u001b[39mflatten()\n\u001b[0;32m    180\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m manager, flattened_output \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(run_managers, flattened_outputs):\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\llms\\base.py:165\u001b[0m, in \u001b[0;36mBaseLLM._generate_helper\u001b[1;34m(self, prompts, stop, run_managers, new_arg_supported, **kwargs)\u001b[0m\n\u001b[0;32m    155\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_generate_helper\u001b[39m(\n\u001b[0;32m    156\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m    157\u001b[0m     prompts: List[\u001b[38;5;28mstr\u001b[39m],\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    161\u001b[0m     \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[0;32m    162\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m LLMResult:\n\u001b[0;32m    163\u001b[0m     \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m    164\u001b[0m         output \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m--> 165\u001b[0m             \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_generate(\n\u001b[0;32m    166\u001b[0m                 prompts,\n\u001b[0;32m    167\u001b[0m                 stop\u001b[38;5;241m=\u001b[39mstop,\n\u001b[0;32m    168\u001b[0m                 \u001b[38;5;66;03m# TODO: support multiple run managers\u001b[39;00m\n\u001b[0;32m    169\u001b[0m                 run_manager\u001b[38;5;241m=\u001b[39mrun_managers[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m run_managers \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m    170\u001b[0m                 \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[0;32m    171\u001b[0m             )\n\u001b[0;32m    172\u001b[0m             \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[0;32m    173\u001b[0m             \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_generate(prompts, stop\u001b[38;5;241m=\u001b[39mstop)\n\u001b[0;32m    174\u001b[0m         )\n\u001b[0;32m    175\u001b[0m     \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    176\u001b[0m         \u001b[38;5;28;01mfor\u001b[39;00m run_manager \u001b[38;5;129;01min\u001b[39;00m run_managers:\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\llms\\base.py:557\u001b[0m, in \u001b[0;36mLLM._generate\u001b[1;34m(self, prompts, stop, run_manager, **kwargs)\u001b[0m\n\u001b[0;32m    554\u001b[0m new_arg_supported \u001b[38;5;241m=\u001b[39m inspect\u001b[38;5;241m.\u001b[39msignature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    555\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m prompt \u001b[38;5;129;01min\u001b[39;00m prompts:\n\u001b[0;32m    556\u001b[0m     text \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m--> 557\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(prompt, stop\u001b[38;5;241m=\u001b[39mstop, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    558\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[0;32m    559\u001b[0m         \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(prompt, stop\u001b[38;5;241m=\u001b[39mstop, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    560\u001b[0m     )\n\u001b[0;32m    561\u001b[0m     generations\u001b[38;5;241m.\u001b[39mappend([Generation(text\u001b[38;5;241m=\u001b[39mtext)])\n\u001b[0;32m    562\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m LLMResult(generations\u001b[38;5;241m=\u001b[39mgenerations)\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\langchain\\llms\\ctransformers.py:102\u001b[0m, in \u001b[0;36mCTransformers._call\u001b[1;34m(self, prompt, stop, run_manager, **kwargs)\u001b[0m\n\u001b[0;32m    100\u001b[0m text \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m    101\u001b[0m _run_manager \u001b[38;5;241m=\u001b[39m run_manager \u001b[38;5;129;01mor\u001b[39;00m CallbackManagerForLLMRun\u001b[38;5;241m.\u001b[39mget_noop_manager()\n\u001b[1;32m--> 102\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient(prompt, stop\u001b[38;5;241m=\u001b[39mstop, stream\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n\u001b[0;32m    103\u001b[0m     text\u001b[38;5;241m.\u001b[39mappend(chunk)\n\u001b[0;32m    104\u001b[0m     _run_manager\u001b[38;5;241m.\u001b[39mon_llm_new_token(chunk, verbose\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose)\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\ctransformers\\llm.py:432\u001b[0m, in \u001b[0;36mLLM._stream\u001b[1;34m(self, prompt, max_new_tokens, top_k, top_p, temperature, repetition_penalty, last_n_tokens, seed, batch_size, threads, stop, reset)\u001b[0m\n\u001b[0;32m    430\u001b[0m count \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m    431\u001b[0m text \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m--> 432\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m token \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgenerate(\n\u001b[0;32m    433\u001b[0m     tokens,\n\u001b[0;32m    434\u001b[0m     top_k\u001b[38;5;241m=\u001b[39mtop_k,\n\u001b[0;32m    435\u001b[0m     top_p\u001b[38;5;241m=\u001b[39mtop_p,\n\u001b[0;32m    436\u001b[0m     temperature\u001b[38;5;241m=\u001b[39mtemperature,\n\u001b[0;32m    437\u001b[0m     repetition_penalty\u001b[38;5;241m=\u001b[39mrepetition_penalty,\n\u001b[0;32m    438\u001b[0m     last_n_tokens\u001b[38;5;241m=\u001b[39mlast_n_tokens,\n\u001b[0;32m    439\u001b[0m     seed\u001b[38;5;241m=\u001b[39mseed,\n\u001b[0;32m    440\u001b[0m     batch_size\u001b[38;5;241m=\u001b[39mbatch_size,\n\u001b[0;32m    441\u001b[0m     threads\u001b[38;5;241m=\u001b[39mthreads,\n\u001b[0;32m    442\u001b[0m     reset\u001b[38;5;241m=\u001b[39mreset,\n\u001b[0;32m    443\u001b[0m ):\n\u001b[0;32m    444\u001b[0m     text \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdetokenize([token])\n\u001b[0;32m    446\u001b[0m     \u001b[38;5;66;03m# https://github.com/abetlen/llama-cpp-python/blob/1a13d76c487df1c8560132d10bda62d6e2f4fa93/llama_cpp/llama.py#L686-L706\u001b[39;00m\n\u001b[0;32m    447\u001b[0m     \u001b[38;5;66;03m# Check if one of the stop sequences is part of the text.\u001b[39;00m\n\u001b[0;32m    448\u001b[0m     \u001b[38;5;66;03m# Note that the stop sequence may not always be at the end of text.\u001b[39;00m\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\ctransformers\\llm.py:390\u001b[0m, in \u001b[0;36mLLM.generate\u001b[1;34m(self, tokens, top_k, top_p, temperature, repetition_penalty, last_n_tokens, seed, batch_size, threads, reset)\u001b[0m\n\u001b[0;32m    387\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m reset:\n\u001b[0;32m    388\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mreset()\n\u001b[1;32m--> 390\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43meval\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtokens\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mthreads\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mthreads\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    391\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m    392\u001b[0m     token \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msample(\n\u001b[0;32m    393\u001b[0m         top_k\u001b[38;5;241m=\u001b[39mtop_k,\n\u001b[0;32m    394\u001b[0m         top_p\u001b[38;5;241m=\u001b[39mtop_p,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    398\u001b[0m         seed\u001b[38;5;241m=\u001b[39mseed,\n\u001b[0;32m    399\u001b[0m     )\n",
-      "File \u001b[1;32mc:\\Users\\Kalyan\\Desktop\\MedicalBot\\medichat\\lib\\site-packages\\ctransformers\\llm.py:307\u001b[0m, in \u001b[0;36mLLM.eval\u001b[1;34m(self, tokens, batch_size, threads)\u001b[0m\n\u001b[0;32m    305\u001b[0m n_tokens \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(tokens)\n\u001b[0;32m    306\u001b[0m tokens \u001b[38;5;241m=\u001b[39m (c_int \u001b[38;5;241m*\u001b[39m n_tokens)(\u001b[38;5;241m*\u001b[39mtokens)\n\u001b[1;32m--> 307\u001b[0m status \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mctransformers_llm_batch_eval\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    308\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    309\u001b[0m \u001b[43m    \u001b[49m\u001b[43mn_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    310\u001b[0m \u001b[43m    \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    311\u001b[0m \u001b[43m    \u001b[49m\u001b[43mthreads\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    312\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    313\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m status:\n\u001b[0;32m    314\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to evaluate tokens.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
-      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
-     ]
-    }
-   ],
-   "source": [
-    "while True:\n",
-    "    user_input=input(f\"Input Prompt:\")\n",
-    "    result=qa({\"query\": user_input})\n",
-    "    print(\"Response : \", result[\"result\"])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.11.5"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/src/gen_ai/README.md b/src/gen_ai/README.md
deleted file mode 100644
index 92268ea..0000000
--- a/src/gen_ai/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-## About The Project
-
-This project leverages natural language processing and information retrieval techniques to create an interactive system for answering user queries based on a collection of PDF documents. The process begins with loading and segmenting PDFs into smaller text chunks. These chunks are then embedded using a pre-trained Hugging Face model. The embeddings are indexed using Pinecone, a vector search engine, facilitating efficient similarity searches. User queries are processed using a retrieval question-answering (QA) system, which combines the Pinecone index, a language model loaded from a file, and a defined prompt template. The project aims to provide concise and accurate responses to user queries, fostering a seamless interaction between the user and the information stored in the PDF documents.
-
-## Built With
-
-- Python
-- LangChain
-- Flask
-- Meta Llama2
-- Pinecone
-
-## Getting Started
-
-This will help you understand how you may give instructions on setting up your project locally.
-To get a local copy up and running follow these simple example steps.
-
-## Installation Steps
-
-### Option 1: Installation from GitHub
-
-Follow these steps to install and set up the project directly from the GitHub repository:
-
-1. **Clone the Repository**
-   - Open your terminal or command prompt.
-   - Navigate to the directory where you want to install the project.
-   - Run the following command to clone the GitHub repository:
-     ```
-     git clone https://github.com/KalyanMurapaka45/Medical-Chatbot-using-Llama-2.git
-     ```
-
-2. **Create a Virtual Environment** (Optional but recommended)
-   - It's a good practice to create a virtual environment to manage project dependencies. Run the following command:
-     ```
-     conda create -p <Environment_Name> python==<python version> -y
-     ```
-
-3. **Activate the Virtual Environment** (Optional)
-   - Activate the virtual environment based on your operating system:
-       ```
-       conda activate <Environment_Name>/
-       ```
-
-4. **Install Dependencies**
-   - Navigate to the project directory:
-     ```
-     cd [project_directory]
-     ```
-   - Run the following command to install project dependencies:
-     ```
-     pip install -r requirements.txt
-     ```
-
-5. **Run the Project**
-   - Start the project by running the appropriate command.
-     ```
-     python app.py
-     ```
-
-6. **Access the Project**
-   - Open a web browser or the appropriate client to access the project.
-
-
-## API Key Setup
-
-
-### Create a `.env` file in the root directory and add your Pinecone credentials as follows:
-
-```ini
-PINECONE_API_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-PINECONE_API_ENV = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-```
-
-
-### Download the quantize model from the link provided in model folder & keep the model in the model directory:
-
-```ini
-
-Model: llama-2-7b-chat.ggmlv3.q4_0.bin
-
-URL: https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
-```
-
-## Contributing
-
-Contributions are what make the open-source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.
-
-• **Report bugs**: If you encounter any bugs, please let us know. Open up an issue and let us know the problem.
-
-• **Contribute code**: If you are a developer and want to contribute, follow the instructions below to get started!
-
-1. Fork the Project
-2. Create your Feature Branch
-3. Commit your Changes
-4. Push to the Branch
-5. Open a Pull Request
-
-• **Suggestions**: If you don't want to code but have some awesome ideas, open up an issue explaining some updates or improvements you would like to see!
-
-#### Don't forget to give the project a star! Thanks again!
-
-## License
-
-This project is licensed under the [Open Source Initiative (OSI)](https://opensource.org/) approved GNU General Public License v3.0 License - see the [LICENSE.txt](LICENSE.txt) file for details.<br>
-
-
-## Contact Details
-
-Hema Kalyan Murapaka - [kalyanmurapaka274@gmail.com](kalyanmurapaka274@gmail.com)<br>
-
-
-## Acknowledgements
-
-We'd like to extend our gratitude to all individuals and organizations who have played a role in the development and success of this project. Your support, whether through contributions, inspiration, or encouragement, has been invaluable. Thank you for being a part of our journey.
diff --git a/src/gen_ai/app.py b/src/gen_ai/app.py
deleted file mode 100644
index a2ff0fd..0000000
--- a/src/gen_ai/app.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from flask import Flask, render_template, jsonify, request
-from src.helper import download_hugging_face_embeddings
-from langchain.vectorstores import Pinecone
-import pinecone
-from langchain.prompts import PromptTemplate
-from langchain.llms import CTransformers
-from langchain.chains import RetrievalQA
-from dotenv import load_dotenv
-from src.prompt import *
-import os
-
-app = Flask(__name__)
-
-load_dotenv()
-
-PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
-PINECONE_API_ENV = os.environ.get('PINECONE_API_ENV')
-
-
-embeddings = download_hugging_face_embeddings()
-
-#Initializing the Pinecone
-pinecone.init(api_key=PINECONE_API_KEY,
-              environment=PINECONE_API_ENV)
-
-index_name="medchat"
-
-#Loading the index
-docsearch=Pinecone.from_existing_index(index_name, embeddings)
-
-
-PROMPT=PromptTemplate(template=prompt_template, input_variables=["context", "question"])
-
-chain_type_kwargs={"prompt": PROMPT}
-
-llm=CTransformers(model="model/llama-2-7b-chat.ggmlv3.q4_0.bin",
-                  model_type="llama",
-                  config={'max_new_tokens':512,
-                          'temperature':0.8})
-
-
-qa=RetrievalQA.from_chain_type(
-    llm=llm, 
-    chain_type="stuff", 
-    retriever=docsearch.as_retriever(search_kwargs={'k': 2}),
-    return_source_documents=True, 
-    chain_type_kwargs=chain_type_kwargs)
-
-
-
-@app.route("/")
-def index():
-    return render_template('chat.html')
-
-
-
-@app.route("/get", methods=["GET", "POST"])
-def chat():
-    msg = request.form["msg"]
-    input = msg
-    print(input)
-    result=qa({"query": input})
-    print("Response : ", result["result"])
-    return str(result["result"])
-
-
-
-if __name__ == '__main__':
-    app.run(host="0.0.0.0", port= 8080, debug= True)
\ No newline at end of file
diff --git a/src/gen_ai/prompts.py b/src/gen_ai/prompts.py
deleted file mode 100644
index 9a01b0f..0000000
--- a/src/gen_ai/prompts.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-import google.generativeai as genai
-
-genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
-text_model = genai.GenerativeModel('gemini-pro') 
-
-def brain_tumour1(disease, tumor_type):
-    prompt = f"Give me information about the brain disease for this suffering type glioma tumour in the following paragraph format:\
-        Disease Name: \
-        Disease Description:\
-        Disease Symptoms:\
-        Disease Treatment:\
-        Disease Food to Eat:\
-        Disease Food to Avoid:"  
-        # Generate information based on the disease and tumor type
-    answer = text_model.generate_content(prompt) # Replace with actual generated content
-    ans = answer.replace('*', '\n') 
-    return ans
-
-def brain_tumour2(disease, tumor_type):
-    prompt = f"Give me information about the brain disease for this suffering type meningioma tumour in the following paragraph format:\
-        Disease Name: \
-        Disease Description:\
-        Disease Symptoms:\
-        Disease Treatment:\
-        Disease Food to Eat:\
-        Disease Food to Avoid:"  
-        # Generate information based on the disease and tumor type
-    answer = text_model.generate_content(prompt) # Replace with actual generated content
-    ans = answer.replace('*', '\n') 
-    return ans
-
-def brain_tumour3(disease, tumor_type):
-    prompt = f"Give me information about the brain disease for this suffering type pituatary tumour in the following paragraph format:\
-        Disease Name: \
-        Disease Description:\
-        Disease Symptoms:\
-        Disease Treatment:\
-        Disease Food to Eat:\
-        Disease Food to Avoid:"  
-        # Generate information based on the disease and tumor type
-    answer = text_model.generate_content(prompt) # Replace with actual generated content
-    ans = answer.replace('*', '\n') 
-    return ans
\ No newline at end of file
diff --git a/src/gen_ai/requirements.txt b/src/gen_ai/requirements.txt
deleted file mode 100644
index e34a9cd..0000000
--- a/src/gen_ai/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-ctransformers==0.2.5
-sentence-transformers==2.2.2
-pinecone-client
-langchain==0.0.225
-flask
-pypdf
-python-dotenv
--e .
diff --git a/src/gen_ai/setup.py b/src/gen_ai/setup.py
deleted file mode 100644
index abfc330..0000000
--- a/src/gen_ai/setup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from setuptools import find_packages, setup
-
-setup(
-    name = 'Medical Chatbot',
-    version= '0.0.0',
-    author= 'Hema Kalyan Murapaka',
-    author_email= 'kalyanmurapaka274@gmail.com',
-    packages= find_packages(),
-    install_requires = []
-
-)
\ No newline at end of file
diff --git a/src/gen_ai/src/__init__.py b/src/gen_ai/src/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/gen_ai/src/helper.py b/src/gen_ai/src/helper.py
deleted file mode 100644
index 7a975c9..0000000
--- a/src/gen_ai/src/helper.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from langchain.document_loaders import PyPDFLoader, DirectoryLoader
-from langchain.text_splitter import RecursiveCharacterTextSplitter
-from langchain.embeddings import HuggingFaceEmbeddings
-
-
-#Extract data from the PDF
-def load_pdf(data):
-    loader = DirectoryLoader(data,glob="*.pdf",loader_cls=PyPDFLoader)
-    documents = loader.load()
-    return documents
-
-#Create text chunks
-def text_split(extracted_data):
-    text_splitter = RecursiveCharacterTextSplitter(chunk_size = 500, chunk_overlap = 20)
-    text_chunks = text_splitter.split_documents(extracted_data)
-    return text_chunks
-
-#download embedding model
-def download_hugging_face_embeddings():
-    embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
-    return embeddings
\ No newline at end of file
diff --git a/src/gen_ai/src/prompt.py b/src/gen_ai/src/prompt.py
deleted file mode 100644
index 685ce9f..0000000
--- a/src/gen_ai/src/prompt.py
+++ /dev/null
@@ -1,10 +0,0 @@
-prompt_template="""
-Use the following pieces of information to answer the user's question.
-If you don't know the answer, just say that you don't know, don't try to make up an answer.
-
-Context: {context}
-Question: {question}
-
-Only return the helpful answer below and nothing else.
-Helpful answer:
-"""
\ No newline at end of file
diff --git a/src/gen_ai/store_index.py b/src/gen_ai/store_index.py
deleted file mode 100644
index b741a04..0000000
--- a/src/gen_ai/store_index.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from src.helper import load_pdf, text_split, download_hugging_face_embeddings
-from langchain.vectorstores import Pinecone
-import pinecone
-from dotenv import load_dotenv
-import os
-
-load_dotenv()
-
-PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
-PINECONE_API_ENV = os.environ.get('PINECONE_API_ENV')
-
-# print(PINECONE_API_KEY)
-# print(PINECONE_API_ENV)
-
-extracted_data = load_pdf("data/")
-text_chunks = text_split(extracted_data)
-embeddings = download_hugging_face_embeddings()
-
-
-#Initializing the Pinecone
-pinecone.init(api_key=PINECONE_API_KEY,
-              environment=PINECONE_API_ENV)
-
-
-index_name="medchat"
-
-#Creating Embeddings for Each of The Text Chunks & storing
-docsearch=Pinecone.from_texts([t.page_content for t in text_chunks], embeddings, index_name=index_name)
\ No newline at end of file
diff --git a/src/gen_ai/template.py b/src/gen_ai/template.py
deleted file mode 100644
index d985198..0000000
--- a/src/gen_ai/template.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-from pathlib import Path
-import logging
-
-logging.basicConfig(level=logging.INFO, format='[%(asctime)s]: %(message)s:')
-
-
-list_of_files = [
-    "src/__init__.py",
-    "src/helper.py",
-    "src/prompt.py",
-    ".env",
-    "setup.py",
-    "NoteBook_Experiments/Researc.ipynb",
-    "app.py",
-    "store_index.py",
-    "static/styles.css",
-    "templates/index.html"]
-
-
-for filepath in list_of_files:
-   filepath = Path(filepath)
-   filedir, filename = os.path.split(filepath)
-
-   if filedir !="":
-      os.makedirs(filedir, exist_ok=True)
-      logging.info(f"Creating directory; {filedir} for the file {filename}")
-
-   if (not os.path.exists(filepath)) or (os.path.getsize(filepath) == 0):
-      with open(filepath, 'w') as f:
-         pass
-         logging.info(f"Creating empty file: {filepath}")
-
-   else:
-      logging.info(f"{filename} is already created")
-      
-      
-    
\ No newline at end of file