Skip to content

Commit

Permalink
Merge pull request #21 from CapitalRobotics/adaptive
Browse files Browse the repository at this point in the history
update package
  • Loading branch information
torinriley authored Nov 18, 2024
2 parents c5745bc + 9517f86 commit d9397b7
Show file tree
Hide file tree
Showing 11 changed files with 431 additions and 194 deletions.
117 changes: 117 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -146,3 +146,120 @@ The Adaptive Task Prediction Model utilizes a TensorFlow Lite (TFLite) model for
- Scores all potential tasks to indicate confidence levels.
- The robot executes the predicted task with the highest score.


# ATEM: Adaptive Task Execution and Machine Learning Package Documentation

ATEM is a Python package designed for adaptive task execution in robotics and AI applications. It provides tools for training machine learning models, interpreting task sequences, and generating optimal task orders for various scenarios.

## Features
- **Adaptive Task Model**: Predict the next task based on sensor data and task history.
- **Task Training**: Train custom machine learning models using a `tasks.json` file.
- **Real-time Adaptation**: Simulate real-world scenarios for task execution.
- **Pathfinding Integration**: Extendable for integration with A* pathfinding for robotics.
- **Lightweight TensorFlow Lite Integration**: For efficient model inference.

---

## Installation

```bash
pip install atem
```
---

## Quick Start Guide
1. **Preparing the tasks.json File**
The tasks.json file defines the tasks and their attributes.

Example
```json
{
"tasks": [
{"name": "Task 1", "points": 10, "time": 5},
{"name": "Task 2", "points": 20, "time": 15},
{"name": "Task 3", "points": 15, "time": 10}
]
}
```
2. **Training a Model**
Use the ModelTrainer class to train a TensorFlow Lite model.

Example
```python
from atem.model_train import ModelTrainer

trainer = ModelTrainer(tasks_file="tasks.json", output_model_path="adaptive_model.tflite")
trainer.train_and_save_model(epochs=20, batch_size=16)

```

3. **Interpreting Tasks**
Use the AdaptiveModel class to interpret task sequences and predict the next task.

Example
```python
from atem import AdaptiveModel

model = AdaptiveModel(model_path="adaptive_model.tflite")

task_to_index = {"Task 1": 0, "Task 2": 1, "Task 3": 2}
index_to_task = {0: "Task 1", 1: "Task 2", 2: "Task 3"}
current_task = "Task 1"
sensor_data = {
"time_elapsed": 20,
"distance_to_target": 1.2,
"gyro_angle": 45,
"battery_level": 80
}
predicted_task, scores = model.predict_next_task(
current_task=current_task,
sensor_data=sensor_data,
task_to_index=task_to_index,
index_to_task=index_to_task,
max_length=5
)

print(f"Predicted Next Task: {predicted_task}")
print(f"Task Scores: {scores}")

```

---

## API Reference

### 1. AdaptiveModel


__init__(model_path: str)
- Initialize the adaptive model with a TFLite model path.

predict_next_task(...)
- Predict the next task based on the current task and sensor data.

### 2. ModelTrainer


__init__(tasks_file: str, output_model_path: str)
- Initialize the trainer with tasks and an output model path.


train_and_save_model(epochs: int, batch_size: int)
- Train the model and save it as a TFLite file.

set_max_length(max_length: int)
- Set the maximum sequence length for task encoding and padding.














5 changes: 5 additions & 0 deletions atem/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .core import AdaptiveModel
from .utils import load_tasks, create_task_encoder
from .model_train import ModelTrainer

__all__ = ["AdaptiveModel", "load_tasks", "create_task_encoder", "ModelTrainer"]
117 changes: 117 additions & 0 deletions atem/core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import tensorflow as tf
import numpy as np
import json


class AdaptiveModel:
def __init__(self, model_path):
"""
Initializes the AdaptiveModel with a TensorFlow Lite interpreter.
Args:
model_path (str): Path to the TFLite model file.
"""
self.model_path = model_path
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()

def _prepare_input(self, current_task, sensor_data, task_to_index, max_length):
"""
Prepares the input tensors for model inference.
Args:
current_task (str): Name of the current task.
sensor_data (dict): Dictionary containing sensor readings.
task_to_index (dict): Mapping of task names to indices.
max_length (int): Maximum length for padding.
Returns:
tuple: Prepared task and sensor feature arrays.
"""
encoded_task = [task_to_index[current_task]]
padded_task = np.pad(encoded_task, (0, max_length - len(encoded_task)), constant_values=0).astype(np.float32).reshape(1, -1)

sensor_features = np.array([
sensor_data["time_elapsed"],
sensor_data["distance_to_target"],
sensor_data["gyro_angle"],
sensor_data["battery_level"]
], dtype=np.float32).reshape(1, -1)

return padded_task, sensor_features

def predict_next_task(self, current_task, sensor_data, task_to_index, index_to_task, max_length):
"""
Predicts the next task based on the current task and sensor data.
Args:
current_task (str): Name of the current task.
sensor_data (dict): Dictionary containing sensor readings.
task_to_index (dict): Mapping of task names to indices.
index_to_task (dict): Mapping of task indices to names.
max_length (int): Maximum length for padding.
Returns:
tuple: The predicted task name and raw output probabilities.
"""
input_details = self.interpreter.get_input_details()
output_details = self.interpreter.get_output_details()

padded_task, sensor_features = self._prepare_input(current_task, sensor_data, task_to_index, max_length)

self.interpreter.set_tensor(input_details[0]['index'], padded_task)
if len(input_details) > 1:
self.interpreter.set_tensor(input_details[1]['index'], sensor_features)

self.interpreter.invoke()
output = self.interpreter.get_tensor(output_details[0]['index'])[0]
predicted_index = np.argmax(output)
return index_to_task[predicted_index], output

def batch_predict(self, task_sensor_pairs, task_to_index, index_to_task, max_length):
"""
Predicts next tasks for a batch of inputs.
Args:
task_sensor_pairs (list of tuples): List of (current_task, sensor_data) pairs.
task_to_index (dict): Mapping of task names to indices.
index_to_task (dict): Mapping of task indices to names.
max_length (int): Maximum length for padding.
Returns:
list of tuples: List of predicted tasks and their raw probabilities.
"""
predictions = []
for current_task, sensor_data in task_sensor_pairs:
predicted_task, raw_output = self.predict_next_task(
current_task, sensor_data, task_to_index, index_to_task, max_length
)
predictions.append((predicted_task, raw_output))
return predictions

def save_predictions(self, predictions, output_file="predictions.json"):
"""
Saves predictions to a JSON file.
Args:
predictions (list of tuples): List of predictions to save.
output_file (str): Path to the JSON file to save.
"""
formatted_predictions = [
{"predicted_task": pred[0], "raw_output": pred[1].tolist()} for pred in predictions
]
with open(output_file, "w") as f:
json.dump(formatted_predictions, f, indent=4)
print(f"Predictions saved to {output_file}")

def load_model(self, new_model_path):
"""
Reloads the model with a new TFLite file.
Args:
new_model_path (str): Path to the new TFLite model file.
"""
self.model_path = new_model_path
self.interpreter = tf.lite.Interpreter(model_path=new_model_path)
self.interpreter.allocate_tensors()
print(f"Model reloaded from {new_model_path}")
Loading

0 comments on commit d9397b7

Please sign in to comment.