Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Created Input Fields for Resumea and Job Description #262

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file removed Data/JobDescription/job_desc_front_end_engineer.pdf
Binary file not shown.
Binary file removed Data/JobDescription/job_desc_full_stack_engineer.pdf
Binary file not shown.
Binary file removed Data/JobDescription/job_desc_java_developer.pdf
Binary file not shown.
Binary file removed Data/JobDescription/job_desc_product_manager.pdf
Binary file not shown.
Binary file added Data/JobDescription/servicenow job1.pdf
Binary file not shown.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

Binary file added Data/Resumes/SophiyaSinghResumeJan24.pdf
Binary file not shown.
Binary file removed Data/Resumes/alfred_pennyworth_pm.pdf
Binary file not shown.
Binary file removed Data/Resumes/barry_allen_fe.pdf
Binary file not shown.
Binary file removed Data/Resumes/bruce_wayne_fullstack.pdf
Binary file not shown.
Binary file removed Data/Resumes/harvey_dent_mle.pdf
Binary file not shown.
Binary file removed Data/Resumes/john_doe.pdf
Binary file not shown.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ Follow these steps to set up the environment and run the application.
8. Run the Application:

```python
streamlit run streamlit_app.py
streamlit run starter.py
```

**Note**: For local versions, you do not need to run "streamlit_second.py" as it is specifically for deploying to Streamlit servers.
Expand Down
92 changes: 48 additions & 44 deletions run_first.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from scripts import JobDescriptionProcessor, ResumeProcessor
from scripts.utils import get_filenames_from_dir, init_logging_config
import streamlit_app

init_logging_config()

Expand All @@ -30,48 +31,51 @@ def remove_old_files(files_path):

logging.info("Deleted old files from " + files_path)

def processing_function():
logging.info("Started to read from Data/Resumes")
try:
# Check if there are resumes present or not.
# If present then parse it.
remove_old_files(PROCESSED_RESUMES_PATH)

file_names = get_filenames_from_dir("Data/Resumes")
logging.info("Reading from Data/Resumes is now complete.")
except:
# Exit the program if there are no resumes.
logging.error("There are no resumes present in the specified folder.")
logging.error("Exiting from the program.")
logging.error("Please add resumes in the Data/Resumes folder and try again.")
exit(1)

# Now after getting the file_names parse the resumes into a JSON Format.
logging.info("Started parsing the resumes.")
for file in file_names:
processor = ResumeProcessor(file)
success = processor.process()
logging.info("Parsing of the resumes is now complete.")

logging.info("Started to read from Data/JobDescription")
try:
# Check if there are resumes present or not.
# If present then parse it.
remove_old_files(PROCESSED_JOB_DESCRIPTIONS_PATH)

file_names = get_filenames_from_dir("Data/JobDescription")
logging.info("Reading from Data/JobDescription is now complete.")
except:
# Exit the program if there are no resumes.
logging.error("There are no job-description present in the specified folder.")
logging.error("Exiting from the program.")
logging.error("Please add resumes in the Data/JobDescription folder and try again.")
exit(1)

# Now after getting the file_names parse the resumes into a JSON Format.
logging.info("Started parsing the Job Descriptions.")
for file in file_names:
processor = JobDescriptionProcessor(file)
success = processor.process()
logging.info("Parsing of the Job Descriptions is now complete.")
logging.info("Success now run `streamlit run streamlit_second.py`")
streamlit_app.result_function()


logging.info("Started to read from Data/Resumes")
try:
# Check if there are resumes present or not.
# If present then parse it.
remove_old_files(PROCESSED_RESUMES_PATH)

file_names = get_filenames_from_dir("Data/Resumes")
logging.info("Reading from Data/Resumes is now complete.")
except:
# Exit the program if there are no resumes.
logging.error("There are no resumes present in the specified folder.")
logging.error("Exiting from the program.")
logging.error("Please add resumes in the Data/Resumes folder and try again.")
exit(1)

# Now after getting the file_names parse the resumes into a JSON Format.
logging.info("Started parsing the resumes.")
for file in file_names:
processor = ResumeProcessor(file)
success = processor.process()
logging.info("Parsing of the resumes is now complete.")

logging.info("Started to read from Data/JobDescription")
try:
# Check if there are resumes present or not.
# If present then parse it.
remove_old_files(PROCESSED_JOB_DESCRIPTIONS_PATH)

file_names = get_filenames_from_dir("Data/JobDescription")
logging.info("Reading from Data/JobDescription is now complete.")
except:
# Exit the program if there are no resumes.
logging.error("There are no job-description present in the specified folder.")
logging.error("Exiting from the program.")
logging.error("Please add resumes in the Data/JobDescription folder and try again.")
exit(1)

# Now after getting the file_names parse the resumes into a JSON Format.
logging.info("Started parsing the Job Descriptions.")
for file in file_names:
processor = JobDescriptionProcessor(file)
success = processor.process()
logging.info("Parsing of the Job Descriptions is now complete.")
logging.info("Success now run `streamlit run streamlit_second.py`")
94 changes: 94 additions & 0 deletions starter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import json
import os
from typing import List
import subprocess

import networkx as nx
import nltk
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
from annotated_text import annotated_text, parameters
from streamlit_extras import add_vertical_space as avs
from streamlit_extras.badges import badge

from scripts.similarity.get_score import *
from scripts.utils import get_filenames_from_dir
from scripts.utils.logger import init_logging_config
import run_first

# Set page configuration
st.set_page_config(
page_title="Resume Matcher",
page_icon="Assets/img/favicon.ico",
initial_sidebar_state="auto",
)

init_logging_config()
cwd = find_path("Resume-Matcher")
config_path = os.path.join(cwd, "scripts", "similarity")

try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")

parameters.SHOW_LABEL_SEPARATOR = False
parameters.BORDER_RADIUS = 3
parameters.PADDING = "0.5 0.25rem"

# Display the main title and subheaders
st.title(":blue[Resume Matcher]")
with st.sidebar:
st.image("Assets/img/header_image.png")
st.subheader(
"Free and Open Source ATS to help your resume pass the screening stage."
)
st.markdown(
"Create Your ATS friendly Resume [www.atsreseume.app](https://atsresume.vercel.app/)"
)


st.divider()
avs.add_vertical_space(1)







# Addition changes

uploaded_resume = st.file_uploader("Upload your Resume", type=["pdf"])
if uploaded_resume is not None:
res_name=uploaded_resume.name
res_save_path = os.path.join("Data/Resumes/", res_name)
with open(res_save_path, "wb") as f:
f.write(uploaded_resume.getvalue())
st.write("Resume saved successfully!")
else:
st.write("Upload your Resume to continue")

uploaded_jd = st.file_uploader("Choose a jd", type=["pdf"])
if uploaded_jd is not None:
jd_name=uploaded_jd.name
jd_save_path = os.path.join("Data/JobDescription/", jd_name)
with open(jd_save_path, "wb") as f:
f.write(uploaded_jd.getvalue())
st.write("Job Description saved successfully!")
else:
st.write("Upload your JD to continue")





# Replace 'script_to_execute.py' with the name of the Python file you want to execute
script_path = 'run_first.py'
if uploaded_resume is not None and uploaded_jd is not None:
# Run the Python script
# subprocess.run(['python', script_path])
run_first.processing_function()

Loading