Skip to content

Commit

Permalink
Nest maintenace example structure similar to foraging
Browse files Browse the repository at this point in the history
  • Loading branch information
aadeshnpn committed Feb 27, 2019
1 parent e1f804d commit 777e54d
Show file tree
Hide file tree
Showing 12 changed files with 370 additions and 94 deletions.
16 changes: 8 additions & 8 deletions examples/cooperative_transport_evolution/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def learning_phase(iteration, early_stop=False):
for i in range(iteration):
# Take a step in evolution
env.step()
print (env.stepcnt)
# print (env.stepcnt)
if (i + 1) % validation_step == 0:
try:
phenotypes = env.behavior_sampling_objects(ratio_value=0.1)
Expand Down Expand Up @@ -139,9 +139,9 @@ def learning_phase(iteration, early_stop=False):
env.pname, env.runid + '-' + 'all', allphenotypes)
try:
# return list(phenotypes.keys())
return phenotypes
return phenotypes, env.pname
except UnboundLocalError:
return None
return None, None


def phenotype_to_json(pname, runid, phenotypes):
Expand All @@ -153,11 +153,11 @@ def phenotype_to_json(pname, runid, phenotypes):
def main(iter):
"""Block for the main function."""
# Run the evolutionary learning algorithm
phenotypes = learning_phase(iter)
phenotypes, pname = learning_phase(iter)
# learning_phase(iter)
# Run the evolved behaviors on a test environment
if phenotypes is not None:
test_loop(phenotypes, 5000)
test_loop(phenotypes, parentname=pname, iteration=5000)


def test_json_phenotype(json):
Expand All @@ -167,14 +167,14 @@ def test_json_phenotype(json):
# print (phenotype)
# phenotype = ' '
if test_loop(phenotype, 2000):
print('foraging success')
print('success')


if __name__ == '__main__':
# Running 50 experiments in parallel
# Parallel(n_jobs=8)(delayed(main)(i) for i in range(2000, 100000, 2000))
# Parallel(n_jobs=4)(delayed(main)(i) for i in range(1000, 8000, 2000))
main(5000)
main(8000)
# json = '1543367322976111-5999.json'
# test_json_phenotype(json)
# Parallel(n_jobs=4)(delayed(main)(8000) for i in range(128))
# Parallel(n_jobs=16)(delayed(main)(16000) for i in range(16))
2 changes: 1 addition & 1 deletion examples/cooperative_transport_evolution/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(
# If parent folder exits create inside it

if parent is not None and pathlib.Path(parent).is_dir():
self.pname = parent + '/' + self.runid + '-' + str(ratio)
self.pname = parent + '/' + self.runid + '-' + str(ratio) + name
else:
self.pname = '/'.join(
os.getcwd().split('/')[:-2]
Expand Down
2 changes: 1 addition & 1 deletion examples/cooperative_transport_evolution/world.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
{
"x":-40,
"y":-40,
"radius":15,
"radius":20,
"q_value":0.9
}
]
Expand Down
88 changes: 53 additions & 35 deletions examples/nest_maintainence_evolution/agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
"""Derived agent class."""

from swarms.lib.agent import Agent

import numpy as np
from swarms.lib.agent import Agent
from swarms.utils.bt import BTConstruct
from swarms.utils.results import Results # noqa : F401

Expand Down Expand Up @@ -126,12 +127,16 @@ def carrying_fitness(self):
This fitness supports the carrying behavior of
the agents.
"""
return len(self.attached_objects) * (self.timestamp)
return sum([obj.weight for obj in self.attached_objects])

def exploration_fitness(self):
"""Compute the exploration fitness."""
# Use exploration space as fitness values
return len(self.location_history) - 1
location = len(self.location_history)
if location == 0:
return 0
else:
return location - 1


class LearningAgent(NMAgent):
Expand All @@ -146,7 +151,7 @@ def __init__(self, name, model):
def init_evolution_algo(self):
"""Agent's GE algorithm operation defination."""
# Genetic algorithm parameters
self.operation_threshold = 2
self.operation_threshold = 50
self.genome_storage = []

# Grammatical Evolution part
Expand All @@ -169,6 +174,8 @@ def init_evolution_algo(self):
# Fitness
self.beta = 0.9
self.diversity_fitness = self.individual[0].fitness
self.individual[0].fitness = 0
self.generation = 0

def construct_bt(self):
"""Construct BT."""
Expand All @@ -180,14 +187,14 @@ def construct_bt(self):
def store_genome(self, cellmates):
"""Store the genome from neighbours."""
# cellmates.remove(self)
# self.genome_storage += [agent.individual[0] for agent in cellmates]
for agent in cellmates:
if agent.debris_collected > 0:
self.genome_storage += agent.individual
elif len(agent.attached_objects) > 0:
self.genome_storage += agent.individual
elif agent.exploration_fitness() > 10:
self.genome_storage += agent.individual
self.genome_storage += [agent.individual[0] for agent in cellmates]
# for agent in cellmates:
# if agent.debris_collected > 0:
# self.genome_storage += agent.individual
# elif len(agent.attached_objects) > 0:
# self.genome_storage += agent.individual
# elif agent.exploration_fitness() > 10:
# self.genome_storage += agent.individual

def exchange_chromosome(self,):
"""Perform genetic operations."""
Expand All @@ -198,7 +205,7 @@ def exchange_chromosome(self,):
new_pop = mutation(self.parameter, cross_pop)
new_pop = evaluate_fitness(new_pop, self.parameter)
individuals = replacement(self.parameter, new_pop, individuals)
individuals.sort(reverse=False)
individuals.sort(reverse=True)
self.individual = [individuals[0]]
self.individual[0].fitness = 0
self.genome_storage = []
Expand All @@ -223,6 +230,7 @@ def genetic_step(self):
self.location_history = set()
self.timestamp = 0
self.diversity_fitness = self.individual[0].fitness
self.generation += 1

def overall_fitness(self):
"""Compute complete fitness.
Expand All @@ -236,10 +244,12 @@ def overall_fitness(self):
# First block gives importance to exploration and when as soon
# food has been found, the next block will focus on dropping
# the food on hub
self.delayed_reward = self.beta * self.delayed_reward
self.individual[0].fitness = self.delayed_reward \
+ self.exploration_fitness() + self.carrying_fitness() \
+ self.debris_collected
self.delayed_reward = round(self.beta * self.delayed_reward, 4)
self.individual[0].fitness = (
self.ef + self.cf * 4 + self.debris_collected * 8)
# self.individual[0].fitness = self.delayed_reward \
# + self.exploration_fitness() + self.carrying_fitness() \
# + self.debris_collected

def get_debris_transported(self):
"""Return debris that have been cleared from hub."""
Expand Down Expand Up @@ -287,23 +297,25 @@ def step(self):
# Increase beta
# self.beta = self.timestamp / self.model.iter

# Maintain location history
self.location_history.add(self.location)

# Compute the behavior tree
self.bt.behaviour_tree.tick()

# Maintain location history
_, gridval = self.model.grid.find_grid(self.location)
self.location_history.add(gridval)

# Find the no.of debris collected from the BT execution
self.debris_collected = self.get_debris_transported()
# * self.get_food_in_hub(False)

# Computes overall fitness using Beta function
self.overall_fitness()

# Hash the phenotype with its fitness
# We need to move this from here to genetic step
cf = self.carrying_fitness()
ef = self.exploration_fitness()
self.cf = self.carrying_fitness()
self.ef = self.exploration_fitness()

# Computes overall fitness using Beta function
self.overall_fitness()
"""
if self.individual[0].phenotype in self.phenotypes.keys():
e, c, f = self.phenotypes[self.individual[0].phenotype]
if f < self.debris_collected:
Expand All @@ -324,32 +336,38 @@ def step(self):
self.phenotypes[self.individual[0].phenotype] = (
self.exploration_fitness(), self.carrying_fitness(),
self.debris_collected)
"""
self.phenotypes = dict()
self.phenotypes[self.individual[0].phenotype] = (
self.individual[0].fitness)

# Find the nearby agents
cellmates = self.model.grid.get_objects_from_grid(
type(self).__name__, self.location)

# If neighbours found, store the genome
if len(cellmates) > 1:
self.store_genome(cellmates)

# Logic for gentic operations.
# If the genome storage has enough genomes and agents has done some
# exploration then compute the genetic step OR
# 600 time step has passed and the agent has not done anything useful
# then also perform genetic step
storage_threshold = len(
self.genome_storage) >= (self.model.num_agents / 10)
if storage_threshold:
# New logic to invoke genetic step
if self.individual[0].fitness <= 0 and self.timestamp > 100:
individual = initialisation(self.parameter, 10)
individual = evaluate_fitness(individual, self.parameter)
self.genome_storage = self.genome_storage + individual
self.genetic_step()
elif (
(
storage_threshold is False and self.timestamp > 50
) and (self.exploration_fitness() < 10)):
individual = initialisation(self.parameter, 10)
individual = evaluate_fitness(individual, self.parameter)
self.genome_storage = individual
self.individual[0].fitness >= 0 and storage_threshold
) and self.timestamp > 200 and self.debris_collected <= 0):
self.genetic_step()

# If neighbours found, store the genome
if len(cellmates) > 1:
self.store_genome(cellmates)


class ExecutingAgent(NMAgent):
"""A nest maintanance swarm agent.
Expand Down
35 changes: 25 additions & 10 deletions examples/nest_maintainence_evolution/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,13 @@
UI = False


def validation_loop(phenotypes, iteration, threshold=10.0):
def validation_loop(
phenotypes, iteration, parentname=None, ratio=1, threshold=10.0):
"""Validate the evolved behaviors."""
# Create a validation environment instance
# print('len of phenotype', len(set(phenotypes)))
valid = ValidationModel(
100, width, height, 10, iter=iteration)
100, width, height, 10, iter=iteration, parent=parentname, ratio=ratio)
# Build the environment
valid.build_environment_from_json()
# Create the agents in the environment from the sampled behaviors
Expand Down Expand Up @@ -95,7 +96,7 @@ def learning_phase(iteration, early_stop=False):
env.create_agents()
# Validation Step parameter
# Run the validation test every these many steps
validation_step = 2000
validation_step = 6000

# Iterate and execute each step in the environment
# Take a step i number of step in evolution environment
Expand All @@ -105,11 +106,15 @@ def learning_phase(iteration, early_stop=False):
# Take a step in evolution
env.step()
if (i + 1) % validation_step == 0:
phenotypes = env.behavior_sampling()
# save the phenotype to json file
phenotype_to_json(env.pname, env.runid + '-' + str(i), phenotypes)
validation_loop(phenotypes, validation_step)

try:
phenotypes = env.behavior_sampling_objects(ratio_value=0.1)
# save the phenotype to json file
phenotype_to_json(
env.pname, env.runid, phenotypes)
validation_loop(
phenotypes, 5000, phenotypes)
except ValueError:
pass
# Plot the fitness in the graph
graph = Graph(
env.pname, 'best.csv', [
Expand All @@ -126,7 +131,16 @@ def learning_phase(iteration, early_stop=False):
"""
# Update the experiment table
env.experiment.update_experiment()
return phenotypes

allphenotypes = env.behavior_sampling_objects(ratio_value=0.99)
# save the phenotype to json file
phenotype_to_json(
env.pname, env.runid + '-' + 'all', allphenotypes)
try:
# return list(phenotypes.keys())
return phenotypes
except UnboundLocalError:
return None


def phenotype_to_json(pname, runid, phenotypes):
Expand All @@ -141,7 +155,8 @@ def main(iter):
phenotypes = learning_phase(iter)
# learning_phase(iter)
# Run the evolved behaviors on a test environment
test_loop(phenotypes, 2000)
if phenotypes is not None:
test_loop(phenotypes, 2000)


def test_json_phenotype(json):
Expand Down
Loading

0 comments on commit 777e54d

Please sign in to comment.