diff --git a/Application/color_ball_tracker.py b/Application/color_ball_tracker.py
index 4957bb3..66bc630 100644
--- a/Application/color_ball_tracker.py
+++ b/Application/color_ball_tracker.py
@@ -1,271 +1,586 @@
-### Copyright Michael@bots4all
-#%% Load modules
-from IPython import get_ipython
-import numpy as np
-import cv2 as cv
-from urllib.request import urlopen
-import socket
-import sys
-import json
-import re
-import time
-import imutils
-
-#%% Clear working space
-# get_ipython().magic('clear')
-# get_ipython().magic('reset -f')
-
-#%% Capture image from camera
-cv.namedWindow('Camera')
-cv.moveWindow('Camera', 0, 0)
-cmd_no = 0
-def capture():
+"""
+Color Ball Tracker,
+Created by: Michael Bozall,
+Date: 2021-08-25,
+Modified by: Rodrigo Barba,
+Date: 2024-11-14,
+Description: This script captures an image from a camera, filters it for a specified color, detects contours, and calculates the distance and angle to a detected object. It then sends commands to a robot to locate and track the object while avoiding obstacles.
+"""
+
+
+
+
+# Load modules
+import re # Regular expressions
+import sys # System-specific parameters and functions
+import json # JSON parsing and manipulation
+import time # Time-related functions
+import socket # Networking support
+import imutils # Additional OpenCV utilities
+import threading # Thread-based parallelism
+import cv2 as cv # OpenCV for computer vision tasks
+import numpy as np # Numerical operations with arrays
+from urllib.request import urlopen # To fetch images from a URL
+from flask_socketio import SocketIO, emit # Socket communication for web interface
+from flask import Flask, Response, render_template # Web server and template rendering
+
+
+
+import os
+os.environ["QT_QPA_PLATFORM"] = "xcb"
+
+# Define color ranges for filtering
+color_ranges = {
+ "green": (np.array([50, 70, 60], dtype="uint8"), np.array([90, 255, 255], dtype="uint8")),
+ "blue": (np.array([100, 150, 0], dtype="uint8"), np.array([140, 255, 255], dtype="uint8")),
+ "red": (np.array([0, 150, 100], dtype="uint8"), np.array([10, 255, 255], dtype="uint8")),
+ "red2": (np.array([170, 150, 100], dtype="uint8"), np.array([180, 255, 255], dtype="uint8"))
+}
+
+# Capture image from camera
+# cv.namedWindow('Camera') # Create a named window for displaying the camera feed
+# cv.moveWindow('Camera', 0, 0) # Position the window at the top-left corner of the screen
+
+# Flask setup
+app = Flask(__name__)
+
+# Initialize SocketIO
+socketio = SocketIO(app, cors_allowed_origins="*")
+
+# Shared variable to hold the captured image
+current_frame = None
+
+cmd_no = 0 # Initialize the command number counter
+
+# Function to switch between colors
+def switch_color(color="blue") -> tuple:
+ """
+ Switches to the desired color HSV range for detection.
+ Accepts "green", "blue", or "red" as input.
+ """
+ if color == "green":
+ return color_ranges["green"]
+ elif color == "blue":
+ return color_ranges["blue"]
+ elif color == "red":
+ return color_ranges["red"]
+ elif color == "red2": # for the second red range due to HSV wraparound
+ return color_ranges["red2"]
+ else:
+ print("Invalid color. Defaulting to green.")
+ return color_ranges["green"]
+
+def show():
+ """
+ Captures an image from the camera and stores it in the global variable.
+ This function runs in a separate thread to ensure the image is updated continuously.
+ """
+ global current_frame
+ while True:
+ # Capture the image from the car's camera
+ img = capture_image() # Assuming capture_image() is your current capture() method
+ current_frame = img # Store the image in the shared variable
+ time.sleep(0.1) # Sleep for a short time to simulate continuous capture
+
+def capture_image(yh=491):
+ """
+ Capture the image using the camera and return it.
+ """
global cmd_no
- cmd_no += 1
- print(str(cmd_no) + ': capture image', end = ': ')
cam = urlopen('http://192.168.4.1/capture')
img = cam.read()
- img = np.asarray(bytearray(img), dtype = 'uint8')
+ img = np.asarray(bytearray(img), dtype='uint8')
img = cv.imdecode(img, cv.IMREAD_UNCHANGED)
+ cv.line(img, (400, 0), (400, 600), (0, 0, 255), 1) # Vertical center line
+ cv.line(img, (0, 600 - yh), (800, 600 - yh), (0, 0, 255), 1) # Horizon line
+ return img
+
+@app.route('/video_feed')
+def video_feed():
+ """
+ A Flask route to stream the current image to the browser.
+ """
+ def generate():
+ global current_frame
+ while True:
+ if current_frame is not None:
+ # Convert the image to JPEG for streaming
+ ret, jpeg = cv.imencode('.jpg', current_frame)
+ if ret:
+ # Return the image in the appropriate format for Flask streaming
+ yield (b'--frame\r\n'
+ b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')
+ time.sleep(0.1) # Sleep to reduce CPU usage
+ return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')
+
+@app.route('/')
+def console_log():
+ return render_template('app_2.html')
+
+# Start the Flask app in a separate thread
+def start_flask():
+ socketio.run(app, host='0.0.0.0', port=5050, allow_unsafe_werkzeug=True)
+
+# Start Flask server in a new thread
+flask_thread = threading.Thread(target=start_flask)
+flask_thread.daemon = True # Daemonize the thread to allow the main program to exit
+flask_thread.start()
+
+# Start the camera capture in a separate thread
+
+capture_thread = threading.Thread(target=show)
+capture_thread.daemon = True # Daemonize the thread to allow the main program to exit
+capture_thread.start()
+
+
+
+
+def capture():
+ """
+ Captures an image from a camera, filters it for a specified color,
+ detects contours, and calculates the distance and angle to a detected object.
+
+ Returns:
+ ball (int): Indicates if a ball is detected (1 if detected, 0 otherwise).
+ dist (float): Calculated distance to the ball.
+ ang_rad (float): Angle to the ball in radians.
+ ang_deg (int): Angle to the ball in degrees.
+ """
+ global cmd_no
+ cmd_no += 1
+ print(str(cmd_no) + ': capture image', end=': ')
+
+ # Switch color filter before processing
+ lu_color_vision = switch_color('red2') # Switch to the desired color (e.g., 'green', 'blue', or 'red')
+
+ # Fetch image from the camera
+ cam = urlopen('http://192.168.4.1/capture') # Open the camera URL
+ img = cam.read() # Read the image bytes
+ img = np.asarray(bytearray(img), dtype='uint8') # Convert bytes to a NumPy array
+ img = cv.imdecode(img, cv.IMREAD_UNCHANGED) # Decode the image
+
# Filter image by color
- mask = cv.medianBlur(img, 5)
- img_hsv = cv.cvtColor(mask, cv.COLOR_BGR2HSV)
- lower = np.array([50, 70, 60], dtype="uint8") # 50, 70, 60
- upper = np.array([90, 255, 255], dtype="uint8") # 90, 200, 230
- mask = cv.inRange(img_hsv, lower, upper)
+ mask = cv.medianBlur(img, 5) # Apply median blur to reduce noise
+ img_hsv = cv.cvtColor(mask, cv.COLOR_BGR2HSV) # Convert the image to HSV color space
+ mask = cv.inRange(img_hsv, lu_color_vision[0], lu_color_vision[1]) # Apply color filter
+
# Detect contours
- mask = cv.erode(mask, None, iterations = 2)
- mask = cv.dilate(mask, None, iterations = 2)
- cont = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
- cont = imutils.grab_contours(cont)
- # Evaluate all contours
- yh = 491 # y-coordinate of line of horizon, contours above it are ignored
- ball = 0 # flag indicating a presence of a ball of the given color
- dist = None # distance to the ball
- ang_rad = 0 # angle to the ball in rad
- ang_deg = 0 # angle to the ball in deg
- area = 0 # area of contour
- area_max = 20 # contours with area smaller than this will be ignored
- ncont = len(cont)
+ mask = cv.erode(mask, None, iterations=2) # Erode to reduce noise
+ mask = cv.dilate(mask, None, iterations=2) # Dilate to restore object size
+ cont = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # Find contours
+ cont = imutils.grab_contours(cont) # Grab the contour list
+
+ # Initialize variables for contour evaluation
+ yh = 491 # Y-coordinate of the horizon line
+ ball = 0 # Flag indicating the presence of a ball
+ dist = None # Distance to the ball
+ ang_rad = 0 # Angle to the ball in radians
+ ang_deg = 0 # Angle to the ball in degrees
+ area = 0 # Contour area
+ area_max = 20 # Minimum contour area to consider
+ ncont = len(cont) # Number of contours detected
+
+ # Evaluate contours
if ncont > 0:
for n in range(ncont):
- # Find center and area of contour
- M = cv.moments(cont[n])
- _xc = int(M['m10']/M['m00'])
- _yc = 600 - int(M['m01']/M['m00']) # make y = 0 at image bottom
- area = M['m00']
- # Find ball with largest area below line of horizon
+ M = cv.moments(cont[n]) # Calculate moments of the contour
+ _xc = int(M['m10'] / M['m00']) # X-coordinate of the contour center
+ _yc = 600 - int(M['m01'] / M['m00']) # Adjust Y-coordinate to start at image bottom
+ area = M['m00'] # Contour area
+
+ # Select the largest valid contour below the horizon
if _yc < yh and area > area_max:
area_max = area
- ball = 1
- nc = n
- xc = _xc - 400 # make x axis go through image center
+ ball = 1 # Mark a ball as detected
+ nc = n # Index of the selected contour
+ xc = _xc - 400 # Center x-coordinate relative to the image center
yc = _yc
- center = (_xc, 600 - _yc) # need only for plotting
+ center = (_xc, 600 - _yc) # Center point for visualization
+
# Calculate distance and angle to the ball
if ball:
- cv.drawContours(img, cont, nc, (0,0,255), 1) # draw selected contour
- cv.circle(img, center, 1, (0,0,255), 2) # draw center
- cv.putText(img, '(' + str(xc) + ', ' + str(yc) + ')', center,
- cv.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv.LINE_AA)
- dy = 4.31*(745.2 + yc)/(yh - yc) # distance to ball along y
- if xc < 0: dy = dy*(1 - xc/1848) # correction factor for negative xc
- dx = 0.00252*xc*dy # distance to ball along x
- dist = np.sqrt(dx**2 + dy**2) # distance to ball
- ang_rad = np.arctan(dx/dy) # angle to ball in rad
- ang_deg = round(ang_rad*180/np.pi) # angle to ball in deg
- print('bd =', round(dist), 'ba =', ang_deg)
+ cv.drawContours(img, cont, nc, (0, 0, 255), 1) # Highlight the selected contour in red
+ cv.circle(img, center, 1, (0, 0, 255), 2) # Mark the center of the ball
+ cv.putText(img, '(' + str(xc) + ', ' + str(yc) + ')', center,
+ cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv.LINE_AA) # Annotate coordinates
+ dy = 4.31 * (745.2 + yc) / (yh - yc) # Calculate distance along the y-axis
+ if xc < 0: dy = dy * (1 - xc / 1848) # Apply correction for negative x-coordinates
+ dx = 0.00252 * xc * dy # Calculate distance along the x-axis
+ dist = np.sqrt(dx**2 + dy**2) # Calculate the total distance
+ ang_rad = np.arctan(dx / dy) # Calculate the angle in radians
+ ang_deg = round(ang_rad * 180 / np.pi) # Convert the angle to degrees
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'cmd',
+ 'color': '#a1ff0a',
+ 'data': f"Ball detected at ({xc}, {yc}) with distance {round(dist)} cm and angle {ang_deg} degrees",
+ }
+ )
else:
- print('no ball')
- cv.line(img, (400,0), (400,600), (0,0,255), 1) # center line
- cv.line(img, (0,600 - yh), (800,600 - yh), (0,0,255), 1) # line of horizon
- cv.imshow('Camera', img)
- cv.waitKey(1)
- return ball, dist, ang_rad, ang_deg
-
-#%% Send a command and receive a response
-off = [0.007, 0.022, 0.091, 0.012, -0.011, -0.05]
-def cmd(sock, do, what = '', where = '', at = ''):
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'cmd',
+ 'color': '#ff0000',
+ 'data': f"No ball detected"
+ }
+ )
+
+ # Draw guidelines
+ cv.line(img, (400, 0), (400, 600), (0, 0, 255), 1) # Vertical center line
+ cv.line(img, (0, 600 - yh), (800, 600 - yh), (0, 0, 255), 1) # Horizon line
+
+ # Display the image
+ # cv.imshow('Camera', img)
+ # cv.waitKey(1) # Wait briefly to refresh the display
+
+ return ball, dist, ang_rad, ang_deg # Return detection results
+
+
+
+
+# Send a command and receive a response
+off = [0.007, 0.022, 0.091, 0.012, -0.011, -0.05] # Offsets for sensor calibration
+
+def cmd(sock, do, what='', where='', at=''):
+ """
+ Sends a command to the robot and processes the response.
+
+ Parameters:
+ sock (socket.socket): The socket connection to the robot.
+ do (str): The action to perform (e.g., 'move', 'set', 'stop').
+ what (str): Additional information about the action (e.g., 'distance', 'motion').
+ where (str): Direction for movement (e.g., 'forward', 'back', 'left', 'right').
+ at (varied): Additional data such as speed, angle, or sensor reading configuration.
+
+ Returns:
+ res (int/float/list): The processed response from the robot.
+ """
global cmd_no
- cmd_no += 1
- msg = {"H":str(cmd_no)} # dictionary
+ cmd_no += 1 # Increment the command counter
+ msg = {"H": str(cmd_no)} # Initialize the command message as a dictionary with a header
+
+ # Determine the type of command and construct the message accordingly
if do == 'move':
- msg["N"] = 3
+ msg["N"] = 3 # Command type for movement
what = ' car '
if where == 'forward':
- msg["D1"] = 3
+ msg["D1"] = 3 # Direction forward
elif where == 'back':
- msg["D1"] = 4
+ msg["D1"] = 4 # Direction backward
elif where == 'left':
- msg["D1"] = 1
+ msg["D1"] = 1 # Direction left
elif where == 'right':
- msg["D1"] = 2
- msg["D2"] = at # at is speed here
- where = where + ' '
+ msg["D1"] = 2 # Direction right
+ msg["D2"] = at # 'at' represents speed here
+ where = where + ' ' # Add a space for logging
+
elif do == 'set':
- msg.update({"N":4,"D1":at[0],"D2":at[1]})
+ msg.update({"N": 4, "D1": at[0], "D2": at[1]}) # Set speed using a tuple (at)
what = ' speed '
+
elif do == 'stop':
- msg.update({"N":1,"D1":0,"D2":0,"D3":1})
+ msg.update({"N": 1, "D1": 0, "D2": 0, "D3": 1}) # Stop the robot
what = ' car'
+
elif do == 'rotate':
- msg.update({"N":5,"D1":1,"D2":at}) # at is an angle here
+ msg.update({"N": 5, "D1": 1, "D2": at}) # Rotate the robot's head to an angle (at)
what = ' head'
where = ' '
+
elif do == 'measure':
if what == 'distance':
- msg.update({"N":21,"D1":2})
+ msg.update({"N": 21, "D1": 2}) # Measure distance
elif what == 'motion':
- msg["N"] = 6
+ msg["N"] = 6 # Measure motion
what = ' ' + what
+
elif do == 'check':
- msg["N"] = 23
+ msg["N"] = 23 # Check if the robot is off the ground
what = ' off the ground'
+
+ # Convert the message dictionary to JSON format
msg_json = json.dumps(msg)
- print(str(cmd_no) + ': ' + do + what + where + str(at), end = ': ')
+ print(str(cmd_no) + ': ' + do + what + where + str(at), end=': ')
+
+ # Send the message and handle potential errors
try:
- sock.send(msg_json.encode())
+ sock.send(msg_json.encode()) # Send the JSON message over the socket
except:
- print('Error: ', sys.exc_info()[0])
- sys.exit()
- while 1:
- res = sock.recv(1024).decode()
- if '_' in res:
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'cmd',
+ 'color': '#ff0000',
+ 'data': f"Error: {sys.exc_info()[0]}",
+ }
+ )
+ sys.exit() # Exit the program if an error occurs
+
+ # Wait for a valid response
+ while True:
+ res = sock.recv(1024).decode() # Receive the response
+ if '_' in res: # Check if the response contains the delimiter
break
+
+ # Extract the relevant portion of the response
res = re.search('_(.*)}', res).group(1)
+
+ # Process the response based on the command type
if res == 'ok' or res == 'true':
- res = 1
+ res = 1 # Successful response
elif res == 'false':
- res = 0
+ res = 0 # Negative response
elif msg.get("N") == 5:
- time.sleep(0.5) # give time to rotate head
+ time.sleep(0.5) # Allow time for the head to rotate
elif msg.get("N") == 21:
- res = round(int(res)*1.3, 1) # UM distance with correction factor
+ res = round(int(res) * 1.3, 1) # Correct the distance measurement
elif msg.get("N") == 6:
- res = res.split(",")
- res = [int(x)/16384 for x in res] # convert to units of g
- res[2] = res[2] - 1 # subtract 1G from az
- res = [round(res[i] - off[i], 4) for i in range(6)]
+ res = res.split(",") # Split the motion data into components
+ res = [int(x) / 16384 for x in res] # Convert to units of g
+ res[2] = res[2] - 1 # Subtract 1G from the z-axis measurement
+ res = [round(res[i] - off[i], 4) for i in range(6)] # Apply calibration offsets
else:
- res = int(res)
- print(res)
- return res
-
-#%% Connect to car's WiFi
-ip = "192.168.4.1"
-port = 100
-print('Connect to {0}:{1}'.format(ip, port))
+ res = int(res) # Convert the response to an integer for other cases
+
+ # Log the response
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'cmd',
+ 'color': '#a1ff0a',
+ 'data': f"{cmd_no}: {do} {what} {where} {at}: {res}",
+ }
+ )
+
+ return res # Return the processed response
+
+
+
+
+# Define the IP address and port of the car's WiFi
+ip = "192.168.4.1" # IP address of the car
+port = 100 # Port number for communication
+socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ff0000',
+ 'data': f"Error: {sys.exc_info()[0]}",
+ }
+)
+
+# Create a socket object for the connection
car = socket.socket()
+
+# Try to connect to the car's WiFi
try:
- car.connect((ip, port))
+ car.connect((ip, port)) # Connect to the specified IP and port
except:
- print('Error: ', sys.exc_info()[0])
- sys.exit()
-print('Connected!')
+ # Handle any connection errors
+ print('Error: ', sys.exc_info()[0]) # Print the error message
+ sys.exit() # Exit the program if connection fails
+
+socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#a1ff0a',
+ 'data': f"Connected to {ip}:{port}",
+ }
+)
-#%% Read first data from socket
-print('Receive from {0}:{1}'.format(ip, port))
+# Try to receive initial data from the socket
try:
- data = car.recv(1024).decode()
+ data = car.recv(1024).decode() # Receive up to 1024 bytes and decode the message
except:
- print('Error: ', sys.exc_info()[0])
- sys.exit()
-print('Received: ', data)
-
-#%% Find the ball
-speed = 100 # car speed
-ang_tol = 10 # tolerance for rotation angle
-ang = [90, ang_tol, 180 - ang_tol] # head rotation angles
-dist = [0, 0, 0] # measured distances to obstacles at rotation angles
-dist_min = 30 # min distance to obstacle (cm)
-d180 = 90 # eq rotation distance for 180 deg turn
-dturn = 60 # eq rotation distance for smaller than 180 deg turns
+ # Handle any errors during data reception
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ff0000',
+ 'data': f"Error: {sys.exc_info()[0]}",
+ }
+ )
+ sys.exit() # Exit the program if data reception fails
+
+socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#a1ff0a',
+ 'data': f"Received: {data}",
+ }
+)
+
+
+
+
+# Define movement and measurement parameters
+speed = 100 # Car speed
+ang_tol = 10 # Tolerance for rotation angle (degrees)
+ang = [90, ang_tol, 180 - ang_tol] # Head rotation angles (center, left, right)
+dist = [0, 0, 0] # Measured distances to obstacles at the defined angles
+dist_min = 30 # Minimum safe distance to an obstacle (cm)
+d180 = 90 # Equivalent rotation distance for a 180-degree turn
+dturn = 60 # Equivalent rotation distance for smaller turns
+
def find_ball():
- time.sleep(0.5)
- found = 0
+ """
+ Locates the ball by rotating the robot's head and measuring distances.
+
+ Steps:
+ 1. Rotate the head to predefined angles and measure distances.
+ 2. Detect the presence of a ball in the camera feed.
+ 3. If the ball is detected and within an acceptable distance, adjust the robot's position to face it.
+ """
+ time.sleep(0.5) # Pause briefly before starting the search
+ found = 0 # Flag to indicate if the ball was found
+
+ # Perform two search cycles
for n in range(2):
+ # In the second cycle, turn the robot based on distance measurements
if n == 1:
- if dist[1] > dist[2]:
- cmd(car, do = 'move', where = 'right', at = speed)
+ if dist[1] > dist[2]: # Check distances to decide the turn direction
+ cmd(car, do='move', where='right', at=speed) # Move right
else:
- cmd(car, do = 'move', where = 'left', at = speed)
- time.sleep(d180/speed)
- cmd(car, do = 'stop')
+ cmd(car, do='move', where='left', at=speed) # Move left
+ time.sleep(d180 / speed) # Wait for the 180-degree turn to complete
+ cmd(car, do='stop') # Stop the robot
+
+ # Rotate the head to each predefined angle and measure distances
for i in range(3):
- cmd(car, do = 'rotate', at = ang[i])
- dist[i] = cmd(car, do = 'measure', what = 'distance')
- ball, bd, ba_rad, ba_deg = capture()
+ cmd(car, do='rotate', at=ang[i]) # Rotate head to the current angle
+ dist[i] = cmd(car, do='measure', what='distance') # Measure distance
+ ball, bd, ba_rad, ba_deg = capture() # Capture image and detect ball
+
+ # If a ball is detected, refine measurements
if ball:
- if ((i == 1 and ba_deg < -ang_tol) or
+ if ((i == 1 and ba_deg < -ang_tol) or
(i == 2 and ba_deg > +ang_tol)):
- # Rotate head more precisely to ball angle to measure distances
+ # Adjust head angle to align more precisely with the ball
um_ang = ang[i] - ba_deg
- cmd(car, do = 'rotate', at = um_ang)
- d = cmd(car, do = 'measure', what = 'distance')
- ball, bd, ba_rad, ba_deg = capture()
+ cmd(car, do='rotate', at=um_ang) # Rotate to the updated angle
+ d = cmd(car, do='measure', what='distance') # Measure distance
+ ball, bd, ba_rad, ba_deg = capture() # Re-capture and re-detect
else:
- um_ang = ang[i]
- d = dist[i]
- if not ball: continue
+ um_ang = ang[i] # Use the current angle
+ d = dist[i] # Use the measured distance
+
+ # If no ball is detected after adjustment, skip
+ if not ball:
+ continue
+
+ # If the detected ball is beyond the minimum safe distance
if d > dist_min:
- found = 1
- print('found ball: bdist =', round(bd,1), 'dist =', d)
- cmd(car, do = 'rotate', at = 90)
+ found = 1 # Mark ball as found
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#a1ff0a',
+ 'data': f"Ball found at {round(bd)} cm and {ba_deg} degrees",
+ }
+ )
+
+ # Rotate head back to the center
+ cmd(car, do='rotate', at=90)
+
+ # Calculate the steering angle to face the ball
steer_ang = 90 - um_ang + ba_deg
- if steer_ang > ang_tol:
- cmd(car, do = 'move', where = 'right', at = speed)
- elif steer_ang < -ang_tol:
- cmd(car, do = 'move', where = 'left', at = speed)
- print('steering angle =', steer_ang)
- time.sleep(dturn/speed*abs(steer_ang)/180)
- cmd(car, do = 'stop')
- time.sleep(0.5)
- _, bd, ba_rad, ba_deg = capture()
- break
+ if steer_ang > ang_tol: # If the angle is to the right
+ cmd(car, do='move', where='right', at=speed) # Move right
+ elif steer_ang < -ang_tol: # If the angle is to the left
+ cmd(car, do='move', where='left', at=speed) # Move left
+
+ # Log the steering angle and adjust position
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#a1ff0a',
+ 'data': f"Steering angle: {steer_ang} degrees",
+ }
+ )
+ time.sleep(dturn / speed * abs(steer_ang) / 180) # Adjust position
+ cmd(car, do='stop') # Stop the robot
+ time.sleep(0.5) # Pause briefly
+ _, bd, ba_rad, ba_deg = capture() # Re-capture the image
+
+ break # Exit the current angle loop once the ball is found
+
+ # Exit the main search loop if the ball is found
if found:
break
+
+ # If the ball is not found, reset head position
if not found:
- cmd(car, do = 'rotate', at = 90)
+ cmd(car, do='rotate', at=90) # Rotate head back to the center
+
+
-#%% Track the ball
+
+#%% Function to track the ball
def track_ball():
+ """
+ Tracks the ball by calculating the turning radius and adjusting wheel speeds.
+ """
+ # Capture the current image and check for the ball
ball, bd, ba_rad, ba_deg = capture()
if ball:
- # Calculate left and right wheel speeds to reach the ball
- r = bd/(2*np.sin(ba_rad)) # required turning radius
- if r > 0 and r <= 707: # turn right
- s0 = 1.111
- ra = -17.7
- rb = 98.4
- else: # turn left or go straight
- s0 = 0.9557 # vl/vr speed ratio to go straight
- ra = 5.86
- rb = -55.9
- speed_ratio = s0*(r - ra)/(r + rb)
- speed_ratio = max(0, speed_ratio)
- if r > 0 and r <= 707: # turn right
- lspeed = speed
- rspeed = round(speed*speed_ratio)
- else: # turn left or go straight
- lspeed = round(speed*speed_ratio)
- rspeed = speed
- cmd(car, do = 'set', at = [rspeed, lspeed])
-
-#%% Main
-cmd(car, do = 'rotate', at = 90)
+ # Calculate the turning radius needed to approach the ball
+ r = bd / (2 * np.sin(ba_rad)) # Required turning radius
+ if r > 0 and r <= 707: # If the radius indicates a right turn
+ s0 = 1.111 # Speed ratio for turning right
+ ra = -17.7 # Radius offset for right turns
+ rb = 98.4 # Radius factor for right turns
+ else: # For left turns or moving straight
+ s0 = 0.9557 # Speed ratio to move straight
+ ra = 5.86 # Radius offset for left turns
+ rb = -55.9 # Radius factor for left turns
+
+ # Calculate speed ratio for the left and right wheels
+ speed_ratio = s0 * (r - ra) / (r + rb) # Adjust speed based on radius
+ speed_ratio = max(0, speed_ratio) # Ensure speed ratio is non-negative
+
+ # Determine wheel speeds based on the turning direction
+ if r > 0 and r <= 707: # Right turn
+ lspeed = speed # Left wheel speed (full speed)
+ rspeed = round(speed * speed_ratio) # Right wheel speed
+ else: # Left turn or moving straight
+ lspeed = round(speed * speed_ratio) # Left wheel speed
+ rspeed = speed # Right wheel speed
+
+ # Send the speed command to the robot
+ cmd(car, do='set', at=[rspeed, lspeed])
+
+
+
+
+#%% Main logic
+# Start by centering the robot's head
+cmd(car, do='rotate', at=90)
+
+# Find the ball before starting the loop
find_ball()
+
+# Infinite loop to track the ball and handle obstacles
while 1:
- # Check if car was lifted off the ground to interrupt the while loop
- if cmd(car, do = 'check'):
- break
- # Track the ball
+ # Check if the robot has been lifted off the ground
+ if cmd(car, do='check'):
+ break # Exit the loop if the robot is lifted
+
+ # Track the ball and adjust movement
track_ball()
- # Check distance to obstacle
- if cmd(car, do = 'measure', what = 'distance') <= dist_min:
- # Detected an obstacle, stop
- cmd(car, do = 'stop')
- # Find the ball
+
+ # Measure the distance to obstacles
+ if cmd(car, do='measure', what='distance') <= dist_min:
+ # If an obstacle is detected, stop the robot
+ cmd(car, do='stop')
+ # Re-locate the ball after stopping
find_ball()
-#%% Close socket
-car.close()
\ No newline at end of file
+#%% Close socket connection
+car.close() # Close the connection to the robot's WiFi
diff --git a/Application/docker/Dockerfile b/Application/docker/Dockerfile
deleted file mode 100644
index e69de29..0000000
diff --git a/Application/docker/Dockerfile.app1 b/Application/docker/Dockerfile.app1
new file mode 100644
index 0000000..6bcbf26
--- /dev/null
+++ b/Application/docker/Dockerfile.app1
@@ -0,0 +1,27 @@
+# Use Python 3.11 Slim as base image
+FROM python:3.11-slim
+
+# Enable working directory
+WORKDIR /app
+
+# Copy requirements.txt file to the container
+COPY requirements.txt /app/
+
+RUN apt-get update && apt-get install -y \
+ libgl1-mesa-glx libglib2.0-0 libsm6 libxrender1 libxext6
+
+# Install dependencies
+RUN python -m venv venv && \
+ . venv/bin/activate && \
+ pip install --no-cache-dir -r requirements.txt
+
+# Expose the port 5050
+EXPOSE 5050
+
+# Copy the application code (app.py) from /app
+COPY /static /app/static
+COPY /templates /app/templates
+COPY /obstacle_tracking.py /app/app.py
+
+# Run the application
+CMD ["/bin/bash", "-c", ". venv/bin/activate && python app.py"]
diff --git a/Application/docker/Dockerfile.app2 b/Application/docker/Dockerfile.app2
new file mode 100644
index 0000000..36a64df
--- /dev/null
+++ b/Application/docker/Dockerfile.app2
@@ -0,0 +1,40 @@
+# Use Python 3.11 Slim as base image
+FROM python:3.11-slim
+
+# Enable working directory
+WORKDIR /app
+
+# Copy requirements.txt file to the container
+COPY requirements.txt /app/
+
+# Install required packages
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ libqt5gui5 \
+ libqt5widgets5 \
+ libqt5core5a \
+ libxcb-xinerama0 \
+ libxcb1 \
+ libxkbcommon-x11-0 \
+ libglib2.0-0 \
+ libgl1-mesa-glx \
+ libfontconfig1 \
+ libdbus-1-3
+
+# Clean up the apt cache
+RUN apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Install dependencies
+RUN python -m venv venv && \
+ . venv/bin/activate && \
+ pip install --no-cache-dir -r requirements.txt
+
+# Expose the port 5050
+EXPOSE 5050
+
+# Copy the application code (app.py) from /app
+COPY /static /app/static
+COPY /templates /app/templates
+COPY /color_ball_tracker.py /app/app.py
+
+# Run the application
+CMD ["/bin/bash", "-c", ". venv/bin/activate && python app.py"]
diff --git a/Application/docker/docker-compose.yaml b/Application/docker/docker-compose.yaml
index e69de29..d893e95 100644
--- a/Application/docker/docker-compose.yaml
+++ b/Application/docker/docker-compose.yaml
@@ -0,0 +1,24 @@
+version: '3.8'
+
+services:
+ app-1:
+ build:
+ context: ../ # Specify the context of the build
+ dockerfile: docker/Dockerfile.app1 # Specify the Dockerfile to use
+ ports:
+ - "5050:5050" # Map port 5000 in the container to port 5050 on the host
+ networks:
+ - flask-network # Connect the container to the flask-network
+
+ app-2:
+ build:
+ context: ../ # Specify the context of the build
+ dockerfile: docker/Dockerfile.app2 # Specify the Dockerfile to use
+ ports:
+ - "5050:5050" # Map port 5000 in the container to port 5051 on the host
+ networks:
+ - flask-network # Connect the container to the flask-network
+
+networks:
+ flask-network:
+ driver: bridge # Use the bridge network driver
diff --git a/Application/obstacle_tracking.py b/Application/obstacle_tracking.py
index 7baf65e..4758626 100644
--- a/Application/obstacle_tracking.py
+++ b/Application/obstacle_tracking.py
@@ -1,3 +1,15 @@
+"""
+Obstacle Tracking and Avoidance with Object Detection and Tracking
+Created by: Michael Bozall,
+Date: 2021-08-25,
+Modified by: Rodrigo Barba,
+Date: 2024-11-14,
+Description: This script demonstrates how to track and avoid obstacles using object detection and tracking with a camera mounted on a car. The car is controlled via a socket connection to send commands and receive responses. The script uses OpenCV for image processing and object detection, and a Flask server to stream the camera feed to a web interface. The car moves forward until an obstacle is detected, then it stops and evades the obstacle by turning left or right based on the available space. The car continues moving forward after evading the obstacle. The script also includes a check to stop the car if it is lifted off the ground to prevent damage to the motors.
+"""
+
+
+
+
# Load modules
import re
import sys
@@ -11,11 +23,14 @@
from flask_socketio import SocketIO, emit
from flask import Flask, Response, render_template
+
+
+
# Flask setup
app = Flask(__name__)
# Initialize SocketIO
-socketio = SocketIO(app)
+socketio = SocketIO(app, cors_allowed_origins="*")
# Send a command and receive a response
cmd_no = 0
@@ -39,7 +54,6 @@ def capture():
def capture_image():
"""
Capture the image using the camera and return it.
- This is the existing capture() function you provided.
"""
global cmd_no
cam = urlopen('http://192.168.4.1/capture')
@@ -49,6 +63,9 @@ def capture_image():
return img
+
+
+
@app.route('/video_feed')
def video_feed():
"""
@@ -67,13 +84,13 @@ def generate():
time.sleep(0.1) # Sleep to reduce CPU usage
return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')
-@app.route('/console_log')
+@app.route('/')
def console_log():
- return render_template('console.html')
+ return render_template('app_1.html')
# Start the Flask app in a separate thread
def start_flask():
- socketio.run(app, host='0.0.0.0', port=5050)
+ socketio.run(app, host='0.0.0.0', port=5050, allow_unsafe_werkzeug=True)
# Start Flask server in a new thread
flask_thread = threading.Thread(target=start_flask)
@@ -81,10 +98,14 @@ def start_flask():
flask_thread.start()
# Start the camera capture in a separate thread
+
capture_thread = threading.Thread(target=capture)
capture_thread.daemon = True # Daemonize the thread to allow the main program to exit
capture_thread.start()
+
+
+
def cmd(sock, do, what='', where='', at=''):
"""
Sends a command to the car and waits for a response.
@@ -134,7 +155,14 @@ def cmd(sock, do, what='', where='', at=''):
try:
sock.send(msg_json.encode())
except:
- socketio.emit('console', f"{cmd_no}: {do} {what} {where} {at}")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'cmd',
+ 'color': '#ff0000',
+ 'data': f"Error: {sys.exc_info()[0]}",
+ }
+ )
sys.exit()
# Wait for the response
@@ -151,9 +179,19 @@ def cmd(sock, do, what='', where='', at=''):
res = round(int(res) * 1.3, 1) # Correct distance with a factor
else:
res = int(res)
- socketio.emit('console', f"{cmd_no}: {do} {what} {where} {at}")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'cmd',
+ 'color': '#a1ff0a',
+ 'data': f"{cmd_no}: {do} {what} {where} {at}: {res}",
+ }
+ )
return res
+
+
+
# Connect to car's WiFi
ip = "192.168.4.1"
port = 100
@@ -164,18 +202,59 @@ def cmd(sock, do, what='', where='', at=''):
try:
car.connect((ip, port))
except:
- print("Error:", sys.exc_info()[0])
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ff0000',
+ 'data': f"Error: {sys.exc_info()[0]}",
+ }
+ )
+
sys.exit()
-print("Connected!")
+
+socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#a1ff0a',
+ 'data': f"Connected to {ip}:{port}",
+ }
+)
# Read first data from socket
-print(f"Receive from {ip}:{port}")
+socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ff8700',
+ 'data': "Reading data from the socket...",
+ }
+)
try:
data = car.recv(1024).decode() # Receive data from the car
except:
- print("Error:", sys.exc_info()[0])
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ff8700',
+ 'data': f"Error: {sys.exc_info()[0]}",
+ }
+ )
sys.exit()
-print("Received:", data)
+
+socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#a1ff0a',
+ 'data': f"Data received: {data}",
+ }
+)
+
+
+
# Evasion of obstacles
speed = 100 # Car speed
@@ -188,7 +267,14 @@ def evade_obstacle():
"""
Handles obstacle evasion with smarter behavior to avoid getting stuck in corners or retrying unnecessary actions.
"""
- print("Obstacle detected! Evading...")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#147df5',
+ 'data': "Obstacle detected. Evading...",
+ }
+ )
cmd(car, do='stop') # Stop the car
# Rotate the sensor to left and right to measure distances
@@ -199,38 +285,95 @@ def evade_obstacle():
# Evaluate distances and decide direction
if dist[1] > dist_min and dist[2] > dist_min: # Both sides clear
- print("Clear on both sides. Moving forward.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#580aff',
+ 'data': "Both sides clear. Moving forward.",
+ }
+ )
cmd(car, do='move', where='forward', at=speed)
elif dist[1] > dist_min: # More space to the left
- print("Turning left to avoid obstacle.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#be0aff',
+ 'data': "Turning left to avoid obstacle.",
+ }
+ )
cmd(car, do='move', where='left', at=speed)
time.sleep(0.5)
# Check if left turn was successful and has enough space to continue
left_check = cmd(car, do='measure', what='distance')
if left_check > dist_min:
- print("Space cleared after left turn, continuing.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#be0aff',
+ 'data': "Space cleared after left turn, continuing.",
+ }
+ )
cmd(car, do='move', where='forward', at=speed)
else:
- print("No space after left turn. Moving backward.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#be0aff',
+ 'data': "No space after left turn. Moving backward.",
+ }
+ )
+
cmd(car, do='move', where='back', at=speed)
time.sleep(0.5)
elif dist[2] > dist_min: # More space to the right
- print("Turning right to avoid obstacle.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#0aefff',
+ 'data': "Turning right to avoid obstacle.",
+ }
+ )
cmd(car, do='move', where='right', at=speed)
time.sleep(0.5)
# Check if right turn was successful and has enough space to continue
right_check = cmd(car, do='measure', what='distance')
if right_check > dist_min:
- print("Space cleared after right turn, continuing.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#0aefff',
+ 'data': "Space cleared after right turn, continuing.",
+ }
+ )
cmd(car, do='move', where='forward', at=speed)
else:
- print("No space after right turn. Moving backward.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#0aefff',
+ 'data': "No space after right turn. Moving backward.",
+ }
+ )
cmd(car, do='move', where='back', at=speed)
time.sleep(0.5)
else: # No space on either side, move backward
- print("No clear path. Moving backward.")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#0aff99',
+ 'data': "No space on either side. Moving backward.",
+ }
+ )
cmd(car, do='move', where='back', at=speed)
time.sleep(0.5)
@@ -240,7 +383,14 @@ def evade_obstacle():
front_distance = cmd(car, do='measure', what='distance')
if front_distance > dist_min or attempt > 3: # Path cleared or too many failed attempts
break
- print("Still stuck, retrying evasive action...")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ffd300',
+ 'data': "Obstacle still in front. Moving backward.",
+ }
+ )
cmd(car, do='move', where='back', at=speed)
time.sleep(0.5)
attempt += 1
@@ -248,6 +398,8 @@ def evade_obstacle():
cmd(car, do='stop') # Stop after avoiding obstacle
+
+
# Main loop
cmd(car, do='rotate', at=90) # Ensure sensor starts centered
cmd(car, do='move', where='forward', at=speed) # Start moving forward
@@ -256,7 +408,14 @@ def evade_obstacle():
while True:
# Check if car was lifted off the ground to interrupt the loop
if cmd(car, do='check'):
- print("Car was lifted. Stopping...")
+ socketio.emit(
+ 'console',
+ {
+ 'type': 'action',
+ 'color': '#ff0000',
+ 'data': "Car was lifted off the ground. Stopping...",
+ }
+ )
break
# Check distance to obstacles
diff --git a/Application/requirements.txt b/Application/requirements.txt
index bff1194..05f08b7 100644
Binary files a/Application/requirements.txt and b/Application/requirements.txt differ
diff --git a/Application/static/css/styles.css b/Application/static/css/styles.css
new file mode 100644
index 0000000..9698ac3
--- /dev/null
+++ b/Application/static/css/styles.css
@@ -0,0 +1,11 @@
+body {
+ margin: 0;
+ padding: 0;
+ background-color: black;
+}
+
+.img-fluid {
+ border-radius: 20px;
+ width: 40%;
+ height: auto;
+}
\ No newline at end of file
diff --git a/Application/static/css/terminalize.css b/Application/static/css/terminalize.css
new file mode 100644
index 0000000..0d83fd9
--- /dev/null
+++ b/Application/static/css/terminalize.css
@@ -0,0 +1,785 @@
+@font-face {
+ font-family: 'Terminal';
+ src: url('../fonts/terminal.ttf');
+}
+
+.terminalize {
+ --color-primary: #00c800;
+ --color-secondary: #000000;
+ --color-tertiary: #007800;
+ --color-quaternary: #001900;
+ --border-style: solid;
+ --border-style-secondary: dashed;
+ --font: 'Terminal', monospace;
+ --post-width: 38rem;
+ --filter: sepia(100%) hue-rotate(90deg);
+
+ font-family: var(--font);
+ font-size: 1rem;
+ color: var(--color-primary);
+ text-shadow: var(--color-primary) 0 -0.1rem 2rem;
+ box-sizing: border-box;
+}
+
+.terminalize .screen {
+ background-color: var(--color-secondary);
+}
+
+.terminalize *,
+.terminalize *:before,
+.terminalize *:after {
+ box-sizing: inherit;
+}
+
+.terminalize img {
+ -webkit-filter: var(--filter);
+ filter: var(--filter);
+}
+
+.terminalize p {
+ text-align: justify;
+}
+
+.t-l {
+ text-align: left !important;
+}
+
+.t-r {
+ text-align: right !important;
+}
+
+.t-c {
+ text-align: center !important;
+}
+
+.terminalize.theme-blue {
+ --color-primary: #04d9ff;
+ --color-tertiary: #106877;
+ --color-quaternary: #050611;
+ --filter: sepia(100%) hue-rotate(180deg);
+}
+
+.terminalize.theme-orange {
+ --color-primary: #f0bf25;
+ --color-secondary: #2c2013;
+ --color-tertiary: #8e7117;
+ --color-quaternary: #110c05;
+ --filter: sepia(100%);
+}
+
+.terminalize input,
+.terminalize textarea,
+.terminalize button,
+.terminalize a {
+ color: var(--color-primary);
+ text-shadow: var(--color-primary) 0 -0.1rem 2rem;
+}
+
+.terminalize a:hover {
+ color: var(--color-secondary);
+ background-color: var(--color-primary);
+ outline: none;
+}
+
+.terminalize textarea::placeholder,
+.terminalize input::placeholder {
+ color: var(--color-tertiary);
+}
+
+.terminalize ::-moz-selection {
+ color: var(--color-secondary);
+ background: var(--color-primary);
+}
+
+.terminalize ::selection {
+ color: var(--color-secondary);
+ background: var(--color-primary);
+}
+
+.terminalize [contenteditable]:focus {
+ outline: 0px solid transparent;
+}
+
+.terminalize table {
+ margin: 1rem;
+ border-collapse: collapse;
+}
+
+.terminalize table.bordered {
+ border: 1px var(--border-style) var(--color-primary);
+}
+
+.terminalize table thead,
+.terminalize table tfoot {
+ background: var(--color-primary);
+ color: var(--color-secondary);
+ height: 2rem;
+ border-top: 1px var(--border-style) var(--color-primary);
+ border-bottom: 1px var(--border-style) var(--color-primary);
+}
+
+.terminalize table.simple thead,
+.terminalize table.simple tfoot {
+ background: var(--color-secondary);
+ color: var(--color-primary);
+ height: 2rem;
+}
+
+.terminalize table.simple thead {
+ border-top: 1px var(--border-style) var(--color-primary);
+ border-bottom: 1px var(--border-style) var(--color-primary);
+}
+
+.terminalize table.simple tfoot {
+ border-top: 1px var(--border-style) var(--color-primary);
+ border-bottom: 1px var(--border-style) var(--color-primary);
+}
+
+.terminalize table tr th,
+.terminalize table tr td {
+ text-align: left;
+ padding: 0.5rem 1rem;
+ border: none;
+}
+
+.terminalize table.highlight tbody tr:hover {
+ color: var(--color-secondary);
+ background: var(--color-primary);
+ cursor: pointer;
+ font-weight: bold;
+}
+
+.terminalize table tbody tr.highlighted:hover {
+ color: var(--color-primary);
+ background-color: var(--color-secondary);
+ font-weight: normal;
+}
+
+.terminalize table tbody tr.highlighted {
+ color: var(--color-secondary);
+ background: var(--color-primary);
+ font-weight: bold;
+}
+
+.terminalize .form-group {
+ padding: 1rem;
+ border: 1px var(--border-style) var(--color-primary);
+ margin-top: -1px;
+}
+
+.terminalize blockquote {
+ white-space: pre-line;
+ display: block;
+ unicode-bidi: embed;
+ color: var(--color-tertiary);
+ border-left: 1px solid var(--color-tertiary);
+ padding: 0.5rem 0 1rem 1rem;
+ text-align: left;
+}
+
+.terminalize pre.code {
+ background-color: var(--color-quaternary);
+ color: var(--color-primary);
+ max-width: var(--post-width);
+ margin: auto;
+ padding: 0.5rem 1rem;
+ border-radius: 5px;
+ line-height: 1.30769231;
+ position: relative;
+ overflow: hidden;
+ max-width: 100%;
+ white-space: break-spaces;
+}
+
+.terminalize pre.code div.title {
+ position: absolute;
+ top: 0;
+ left: 0;
+ white-space: normal;
+ border-radius: 5px 0 5px 0;
+ padding: 0.3rem 1rem;
+ background-color: var(--color-primary);
+ color: var(--color-quaternary);
+ font-family: var(--font);
+}
+
+.terminalize pre.code code {
+ font-family: var(--font);
+ overflow: auto;
+}
+
+.terminalize pre.code div.title.right {
+ left: unset;
+ right: 0;
+ border-radius: 0 5px 0 5px;
+}
+
+.terminalize .post {
+ margin: auto;
+ text-align: justify;
+}
+
+.terminalize span.cursor {
+ height: 1em;
+ border-left: .15em solid var(--color-primary);
+}
+
+.terminalize span.cursor.blink {
+ animation: blink-caret .75s step-end infinite;
+}
+
+.terminalize .pannel {
+ position: relative;
+ overflow: hidden;
+ flex: 1;
+ max-width: 1200px;
+ max-height: calc(100% - 2rem);
+ display: flex;
+ flex-direction: column;
+ justify-content: space-between;
+ margin: 1rem 0.5rem;
+ border: 1px var(--border-style) var(--color-primary);
+}
+
+.terminalize .pannel.fluid {
+ max-height: none;
+}
+
+.terminalize .pannel .pannel-header {
+ overflow: hidden;
+ display: flex;
+ flex-direction: row;
+ justify-content: space-around;
+ align-items: center;
+ text-align: left;
+ padding: 0;
+ border-bottom: 1px var(--border-style) var(--color-primary);
+ min-height: 2rem;
+}
+
+.terminalize .pannel .pannel-header>* {
+ padding: 0.5rem 1rem;
+}
+
+.terminalize .pannel .pannel-header .controls {
+ padding: 0;
+ flex: 0;
+ display: flex;
+ flex-direction: row;
+ border-left: 1px var(--border-style) var(--color-primary);
+}
+
+.terminalize .pannel .pannel-header .controls .control {
+ padding: 0.5rem;
+ padding: 0.5rem;
+ min-width: 1.5rem;
+ text-align: center;
+}
+
+.terminalize .pannel .pannel-header .controls .control:hover {
+ cursor: pointer;
+ background-color: var(--color-primary);
+ color: var(--color-secondary);
+}
+
+.terminalize .pannel .pannel-body {
+ overflow-y: auto;
+ padding: 1rem;
+}
+
+.terminalize .pannel .pannel-footer {
+ overflow: hidden;
+ display: flex;
+ flex-direction: row;
+ justify-content: space-around;
+ align-items: center;
+ text-align: right;
+ border-top: 1px var(--border-style) var(--color-primary);
+ min-height: 2rem;
+}
+
+.terminalize .pannel .pannel-footer>* {
+ padding: 0.5rem 1rem;
+}
+
+.terminalize .rounded {
+ border-radius: 8px;
+}
+
+.terminalize .screen {
+ display: flex;
+ flex-direction: row;
+ justify-content: space-around;
+ align-items: center;
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+}
+
+.terminalize .row {
+ display: flex;
+ flex-direction: row;
+ justify-content: center;
+ align-items: flex-start;
+ flex-wrap: wrap;
+}
+
+.terminalize .col {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ padding: 0 1rem;
+ flex: 1;
+ position: relative;
+ max-width: 100%;
+}
+
+.terminalize .type {
+ overflow: hidden;
+ white-space: nowrap;
+ animation: enlarge 3.5s steps(40, end);
+}
+
+.terminalize {
+ scrollbar-width: thin;
+ scrollbar-color: var(--color-primary) var(--color-secondary);
+}
+
+.terminalize ::-webkit-scrollbar {
+ width: 0.8rem;
+ /* width of the entire scrollbar */
+}
+
+.terminalize ::-webkit-scrollbar-track {
+ background: var(--color-secondary);
+ /* color of the tracking area */
+}
+
+.terminalize ::-webkit-scrollbar-thumb {
+ background-color: var(--color-primary);
+ /* color of the scroll thumb */
+ border-radius: 8px;
+ /* roundness of the scroll thumb */
+ border: 2px var(--border-style) var(--color-tertiary);
+ /* creates padding around scroll thumb */
+}
+
+.terminalize .a-s {
+ align-items: flex-start !important;
+}
+
+.terminalize .a-e {
+ align-items: flex-end !important;
+}
+
+.terminalize .a-c {
+ align-items: center !important;
+}
+
+.terminalize .a-auto {
+ align-items: center !important;
+}
+
+.terminalize .f-0 {
+ flex: 0 !important;
+}
+
+.terminalize .j-c {
+ justify-content: center;
+}
+
+.terminalize .j-l {
+ justify-content: left;
+}
+
+.terminalize .j-r {
+ justify-content: right;
+}
+
+.terminalize .j-c {
+ justify-content: space-around;
+}
+
+.terminalize .j-b {
+ justify-content: space-between;
+}
+
+.terminalize .w-100 {
+ width: 100%;
+}
+
+
+/* COMPONENTS */
+
+.terminalize .checkbox {
+ display: block;
+ position: relative;
+ padding-left: 1.8rem;
+ margin: 1rem 0;
+ cursor: pointer;
+ font-size: 1rem;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+
+.terminalize .checkbox input {
+ position: absolute;
+ opacity: 0;
+ cursor: pointer;
+ height: 0;
+ width: 0;
+}
+
+.terminalize .checkmark {
+ border: 1px var(--border-style) var(--color-primary);
+ position: absolute;
+ top: 0;
+ left: 0;
+ height: 16px;
+ width: 16px;
+ background-color: transparent;
+ box-sizing: border-box !important;
+}
+
+.terminalize .checkbox:hover input~.checkmark {
+ background-color: var(--color-secondary);
+}
+
+.terminalize .checkbox .checkmark>span {
+ background-color: transparent;
+ position: absolute;
+ height: 10px;
+ width: 10px;
+ left: 2px;
+ top: 2px;
+}
+
+.terminalize .checkbox input:checked~.checkmark>span {
+ background-color: var(--color-primary);
+}
+
+.terminalize .checkmark:after {
+ content: "";
+ position: absolute;
+ display: none;
+}
+
+.terminalize .checkbox input:checked~.checkmark:after {
+ display: block;
+}
+
+.terminalize input[type=text],
+.terminalize input[type=number] {
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+
+.terminalize input[type=text],
+.terminalize input[type=number] {
+ display: block;
+ position: relative;
+ margin: 0.5rem 0;
+ font-size: 1rem;
+ outline: 1px var(--border-style) var(--color-primary);
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ appearance: none;
+ margin: 0;
+ border: 0;
+ padding: 1rem;
+ display: inline-block;
+ white-space: normal;
+ background: none;
+ line-height: 1;
+ color: var(--color-primary);
+ background-color: var(--color-secondary);
+ font-family: var(--font);
+}
+
+.terminalize textarea {
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ min-height: 10rem;
+ min-width: 10rem;
+ width: 100%;
+ appearance: none;
+ padding: 1rem;
+ font-size: 1rem;
+ border: 1px var(--border-style) var(--color-primary);
+ background-color: var(--color-secondary);
+ font-family: var(--font);
+ color: var(--color-primary);
+}
+
+.terminalize textarea:focus-visible {
+ outline: none;
+}
+
+.terminalize .inverse {
+ background-color: var(--color-primary) !important;
+ color: var(--color-secondary) !important;
+ font-weight: bold;
+}
+
+.terminalize .b-alt {
+ border-style: var(--border-style-secondary) !important;
+}
+
+.terminalize progress {
+ -moz-appearance: none;
+ -webkit-appearance: none;
+ appearance: none;
+ border: 1px var(--border-style) var(--color-primary);
+
+}
+
+.terminalize ::-webkit-progress-bar {
+ background-color: var(--color-secondary);
+}
+
+.terminalize ::-moz-progress-bar {
+ background-color: var(--color-secondary);
+}
+
+.terminalize ::-webkit-progress-value {
+ background-color: var(--color-primary);
+}
+
+.terminalize ::-moz-progress-value {
+ background-color: var(--color-primary);
+}
+
+.terminalize .btn {
+ display: inline-block;
+ border: 1px var(--border-style) var(--color-primary);
+ padding: 0.8rem 1rem;
+ min-width: 8rem;
+ text-align: center;
+}
+
+.terminalize .btn.active {
+ background-color: var(--color-primary);
+ color: var(--color-secondary);
+ font-weight: bold;
+}
+
+.terminalize .btn:focus,
+.terminalize .btn:hover,
+.terminalize .btn-primary:focus,
+.terminalize .btn:active,
+.terminalize .btn-primary:active {
+ background-color: var(--color-primary);
+ color: var(--color-secondary);
+ outline: none;
+ cursor: pointer;
+ user-select: none;
+ font-weight: bold;
+}
+
+.terminalize .btn.active:focus,
+.terminalize .btn.active:hover,
+.terminalize .btn.active-primary:focus,
+.terminalize .btn.active:active,
+.terminalize .btn.active:active {
+ background-color: var(--color-secondary) !important;
+ color: var(--color-primary) !important;
+ font-weight: normal;
+}
+
+.terminalize .select {
+ border: 1px var(--border-style) var(--color-primary);
+ height: 30px;
+ overflow: hidden;
+ max-width: 15rem;
+ position: relative;
+}
+
+.terminalize .select.no-border {
+ border: 0;
+}
+
+.terminalize .select::after {
+ content: "\025be";
+ font-size: 2rem;
+ color: var(--color-secondary);
+ display: table-cell;
+ text-align: center;
+ width: 30px;
+ height: 36px;
+ background-color: var(--color-primary);
+ position: absolute;
+ top: -6px;
+ right: 0px;
+ pointer-events: none;
+}
+
+.terminalize select {
+ width: 100%;
+ text-shadow: var(--color-primary) 0 -0.1rem 2rem;
+ font-size: 1rem;
+ background: transparent;
+ color: var(--color-primary);
+ font-family: 'Terminal';
+ border: none;
+ font-size: 14px;
+ height: 30px;
+ padding: 5px;
+ padding-right: 2rem;
+}
+
+.terminalize select:focus {
+ outline: none;
+}
+
+@keyframes blink-caret {
+
+ from,
+ to {
+ border-color: transparent
+ }
+
+ 50% {
+ border-color: var(--color-primary);
+ }
+}
+
+@keyframes enlarge {
+ from {
+ width: 0
+ }
+
+ to {
+ width: 100%
+ }
+}
+
+/* SCANLINES */
+.scanlines {
+ position: relative;
+ overflow: hidden;
+}
+
+.scanlines:before,
+.scanlines:after {
+ display: block;
+ pointer-events: none;
+ content: "";
+ position: absolute;
+}
+
+.scanlines:before {
+ width: 100%;
+ height: 5px;
+ z-index: 2147483649;
+ background: rgba(0, 255, 0, 0.3);
+ opacity: 0.75;
+ -webkit-animation: scanline 6s linear infinite;
+ animation: scanline 6s linear infinite;
+}
+
+.scanlines:after {
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ z-index: 2147483648;
+ background: -webkit-linear-gradient(top,
+ transparent 50%,
+ rgba(0, 80, 0, 0.3) 51%);
+ background: linear-gradient(to bottom,
+ transparent 50%,
+ rgba(0, 80, 0, 0.3) 51%);
+ background-size: 100% 0.5vh;
+ -webkit-animation: scanlines 1s steps(60) infinite;
+ animation: scanlines 1s steps(60) infinite;
+}
+
+/* SCANLINE ANIMATION */
+@-webkit-keyframes scanline {
+ 0% {
+ -webkit-transform: translate3d(0, 200000%, 0);
+ transform: translate3d(0, 200000%, 0);
+ }
+}
+
+@keyframes scanline {
+ 0% {
+ -webkit-transform: translate3d(0, 200000%, 0);
+ transform: translate3d(0, 200000%, 0);
+ }
+}
+
+@-webkit-keyframes scanlines {
+ 0% {
+ background-position: 0 50%;
+ }
+}
+
+@keyframes scanlines {
+ 0% {
+ background-position: 0 50%;
+ }
+}
+
+.blink {
+ animation: blink 1s infinite;
+}
+
+@keyframes blink {
+ to {
+ opacity: 0;
+ }
+}
+
+/* RESPONSIVE */
+
+/* Small devices (portrait tablets and large phones, 600px and up) */
+@media only screen and (max-width: 600px) {
+ .terminalize blockquote {
+ margin: 0;
+ }
+
+ .terminalize .col {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ padding: 0;
+ flex: 1;
+ position: relative;
+ }
+
+ .terminalize pre.code {
+ padding: 0 0 0 0.5rem;
+ max-width: 100%;
+ margin: 0;
+ }
+
+ .terminalize .pannel {
+ margin: 1rem 0rem;
+ }
+
+ .terminalize .pannel .pannel-body {
+ padding: 0.4rem;
+ }
+
+ .terminalize .row {
+ flex-direction: column;
+ align-items: center;
+ }
+
+ .terminalize table {
+ margin: 1rem 0rem;
+ }
+
+ .terminalize .a-auto {
+ align-items: flex-start !important;
+ }
+
+}
+
+/* Medium devices (landscape tablets, 768px and up) */
+@media only screen and (max-width: 768px) {}
\ No newline at end of file
diff --git a/Application/templates/app_1.html b/Application/templates/app_1.html
new file mode 100644
index 0000000..15a3a7b
--- /dev/null
+++ b/Application/templates/app_1.html
@@ -0,0 +1,148 @@
+
+
+
+
+
+
+ Obstacle Tracking Application
+
+
+
+
+
+
+
+
+
+
+
+
Obstacle Tracking Application
+
+
+
+
x
+
+
+
+
+
+
+
What is this? / What does it do?
+
+
+
+
+ Welcome to the Obstacle Tracking Application. This application is designed to track
+ obstacles
+ in real-time using a ultrasonic sensor. The application will detect obstacles and
+ provide a
+ description of the detected object and do a specific action based on the detected
+ object.
+
+
+ The application will provide a live feed of the camera and the output of the actions and
+ logs
+ will be displayed in the console below.
+
+
+ The application is created using Python and Flask for the backend and HTML, CSS, and
+ JavaScript
+ for the frontend. The application uses Socket.IO to communicate between the server and
+ the client.
+
+
+
- Love from Angel, Mae and Rodrigo ˗ˋˏ ♡ ˎˊ˗
+
+
+
+
+
+
+
Camera Feed
+
+
+
+
+
+
+
+
+
+
+
Action Output
+
+
+
+
+
+
+
+
+
+
+
Log Output
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Application/templates/app_2.html b/Application/templates/app_2.html
new file mode 100644
index 0000000..974c6d9
--- /dev/null
+++ b/Application/templates/app_2.html
@@ -0,0 +1,147 @@
+
+
+
+
+
+
+ Color Ball Tracking Application
+
+
+
+
+
+
+
+
+
+
+
+
Color Ball Tracking Application
+
+
+
+
x
+
+
+
+
+
+
+
What is this? / What does it do?
+
+
+
+
+ Welcome to the Color Ball Tracking Application. This application is designed to track
+ color balls
+ in real-time using a camera. The application will detect color balls and if its the
+ right
+ color, it will go to the ball, if not, it will ignore it.
+
+
+ The application will provide a live feed of the camera and the output of the actions and
+ logs
+ will be displayed in the console below.
+
+
+ The application is created using Python and Flask for the backend and HTML, CSS, and
+ JavaScript
+ for the frontend. The application uses Socket.IO to communicate between the server and
+ the client.
+
+
+
- Love from Angel, Mae and Rodrigo ˗ˋˏ ♡ ˎˊ˗
+
+
+
+
+
+
+
Camera Feed
+
+
+
+
+
+
+
+
+
+
+
Action Output
+
+
+
+
+
+
+
+
+
+
+
Log Output
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Application/templates/console.html b/Application/templates/console.html
deleted file mode 100644
index 2ee91e2..0000000
--- a/Application/templates/console.html
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-
-
-
-
- Car Control
-
-
-
-
-
-
-
-
Car Control and Live Feed
-
-
-
-
Live Video Feed
-
-
-
-
-
-
Console Output
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/README.md b/README.md
index d3a3835..8269a5d 100644
--- a/README.md
+++ b/README.md
@@ -1,49 +1,83 @@
-# 🤖 Robot Autónomo con Reconocimiento de Bolitas de Colores
+# 🤖 Robot Autónomo con Reconocimiento de Obstáculos y Reconocimiento de Pelotas de Colores
-Este proyecto tiene como objetivo desarrollar un **robot autónomo** utilizando una **Raspberry Pi** junto con el kit de **Elegoo Smart Robot Car V4.0**. El robot está diseñado para reconocer y seguir bolitas de colores mediante el uso de **Python** y **OpenCV** para procesar las imágenes capturadas por la cámara del robot.
+Este proyecto tiene como objetivo desarrollar un **robot autónomo** utilizando una **Raspberry Pi** junto con el kit de **Elegoo Smart Robot Car V4.0**. El robot es capaz de detectar pelotas de colores en su entorno utilizando **visión por computadora** y seguirlas de manera autónoma, evitando obstáculos en su camino. El sistema utiliza una cámara del Smart Robot Car para capturar imágenes en tiempo real y procesarlas con **OpenCV** para identificar las pelotas de colores. Una vez detectadas, el robot calcula la distancia y el ángulo hacia la pelota y ajusta su trayectoria para seguirla. Además, el robot es capaz de detectar obstáculos cercanos y detenerse para evitar colisiones. El proyecto combina conceptos de robótica móvil, visión por computadora y control autónomo para crear un robot inteligente y autónomo.
+
+---
## Características principales:
- **Visión por Computadora:** Utiliza **OpenCV** para capturar imágenes de la cámara y procesarlas en tiempo real para identificar bolitas de colores específicos.
- **Autonomía en Movimiento:** El robot se mueve hacia la bolita de color detectada, adaptando su trayectoria para alcanzarla utilizando cálculos de distancia y ángulo basados en los datos obtenidos de la cámara.
- **Control del Robot:** El movimiento y la orientación del robot son controlados a través de comandos enviados a la placa base del Elegoo Smart Robot Car V4.0, con un control preciso de la velocidad y el ángulo de giro.
-- **Hardware:** Utiliza una **Raspberry Pi** como cerebro del sistema, que se conecta a la cámara y controla el Elegoo Smart Robot Car V4.0. El robot es alimentado por una batería recargable, lo que le permite funcionar de manera autónoma durante varias horas.
+- **Hardware:** Utiliza una **Raspberry Pi** como cerebro del sistema, donde el Smart Robot toma fotogramas de la cámara, las envia a la Raspberry Pi para el procesamiento y toma decisiones de movimiento, estas decisiones son enviadas al Smart Robot para procesar el movimiento.
+- **Detección de Obstáculos:** El robot es capaz de detectar obstáculos cercanos utilizando un sensor de ultrasonido y detenerse para evitar colisiones, reanudando su movimiento una vez que el obstáculo ha sido superado.
+
+---
## Componentes utilizados:
- **Raspberry Pi:** Placa de desarrollo que actúa como el controlador principal del robot, ejecutando los scripts en Python y gestionando las operaciones de visión por computadora.
-- **Elegoo Smart Robot Car V4.0:** Kit de robot con motores, ruedas y sensores de proximidad, utilizado para el movimiento y la navegación del robot.
-- **Python & OpenCV:** Lenguaje de programación y biblioteca de visión por computadora para procesar las imágenes, detectar bolitas de colores y calcular la posición relativa del robot con respecto a la bolita.
-- **Cámara:** Cámara conectada a la Raspberry Pi para capturar imágenes y detectar colores en tiempo real.
+
+- **Elegoo Smart Robot Car V4.0:** Kit de robot con motores de Elegoo, ruedas y sensores de proximidad, utilizado para el movimiento y la navegación del robot.
+- **Python & OpenCV:** Lenguaje de programación y biblioteca de visión por computadora para procesar las imágenes, detectar pelotas de colores y calcular la posición relativa del robot con respecto a la bolita, además de controlar el movimiento del robot.
+- **Cámara:** Cámara ESP32-S3 para capturar imágenes en tiempo real y procesarlas para la detección de bolitas de colores.
+
+---
## Funcionalidades del robot:
-1. **Detección de bolitas de colores:** El robot es capaz de reconocer bolitas de colores utilizando un filtro de colores en el espacio de color HSV. Una vez detectada, calcula la distancia y el ángulo hacia la bolita.
-2. **Movimiento hacia el objetivo:** El robot se mueve hacia la bolita detectada, ajustando su dirección según el cálculo del ángulo y la distancia.
-3. **Evasión de obstáculos:** Además de seguir bolitas de colores, el robot es capaz de detectar obstáculos cercanos mediante sensores de distancia, deteniéndose automáticamente para evitar colisiones.
-4. **Rotación de cámara:** La cámara del robot puede rotar para realizar un barrido y encontrar bolitas de colores en diferentes posiciones.
+1. **Detección de Pelotas de Colores:** El robot es capaz de detectar pelotas de colores específicos en su entorno utilizando visión por computadora y OpenCV.
+2. **Seguimiento de Pelotas:** Una vez que se detecta una pelota de color, el robot calcula la distancia y el ángulo hacia la pelota y ajusta su trayectoria para seguirla.
+3. **Evitación de Obstáculos:** El robot es capaz de detectar obstáculos cercanos utilizando un sensor de ultrasonido y detenerse para evitar colisiones.
+4. **Control de Movimiento:** El robot puede moverse hacia adelante, hacia atrás, girar a la izquierda y girar a la derecha, controlando la velocidad y el ángulo de giro.
+
+---
## Objetivos de aprendizaje:
-- Aprender sobre robótica móvil y control autónomo.
-- Introducción a la visión por computadora utilizando OpenCV.
-- Integración de hardware y software en un proyecto de robótica.
-- Desarrollo de algoritmos para la detección de objetos y el seguimiento en tiempo real.
+- **Robótica Móvil:** Aprender los conceptos básicos de la robótica móvil y cómo construir un robot autónomo utilizando una Raspberry Pi y un kit de robot.
+- **Visión por Computadora:** Entender cómo utilizar OpenCV para procesar imágenes en tiempo real y detectar objetos de interés en un entorno.
+- **Control Autónomo:** Implementar un sistema de control autónomo que permita al robot tomar decisiones de movimiento basadas en la información capturada por la cámara y los sensores.
+
+---
-## Requisitos:
+## Requisitos previos:
- **Hardware:**
- Raspberry Pi (Modelo 3B+ o superior recomendado).
- Elegoo Smart Robot Car V4.0.
- - Cámara compatible con Raspberry Pi.
- **Software:**
- - Python 3.x.
+ - Python 3.x. (Se recomienda la versión 3.7 o superior).
- OpenCV.
- Bibliotecas necesarias para la Raspberry Pi y el Elegoo Smart Robot Car.
+ - Sistema operativo Raspbian instalado en la Raspberry Pi.
+ - IDE o editor de código para programar en Python (por ejemplo, Thonny, VS Code, etc.).
+ - Docker para creación de contenedores.
+
+---
## Instalación:
1. Clona este repositorio:
- ```bash
- git clone https://github.com/tu_usuario/robot_reconocimiento_bolitas.git
+ ```bash
+ git clone https://github.com/rodrigobarbaedu/color-ball-robot-tracker.git
+ cd color-ball-robot-tracker/Application/docker
+ ```
+
+2. (Requerimiento: Docker) Construye la imagen del contenedor:
+ ```bash
+ docker build --no-cache -t app-1 app-2
+ ```
+
+3. Ejecuta los contenedores:
+ ```bash
+ docker-compose up -d app-1 app-2
+ ```
+
+4. Accede a la dirección IP de la Raspberry Pi en un navegador web:
+ ```bash
+ http://:5050
+ ```
+
+5. ¡Listo! Ahora puedes ver la interfaz web del robot.