Skip to content

Commit

Permalink
Merge pull request #60 from OObasuyi/v2.4-dev
Browse files Browse the repository at this point in the history
V2.4 dev
  • Loading branch information
OObasuyi authored Nov 23, 2022
2 parents edee55b + 5add0b4 commit ab7b166
Show file tree
Hide file tree
Showing 12 changed files with 342 additions and 70 deletions.
7 changes: 5 additions & 2 deletions README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,13 @@ augWork = FireStick(ippp_location='ippp_rel4.csv', access_policy='acp', ftd_host
augWork.policy_deployment_flow()
```
### CAW

```python
from fw_cleanup import FireBroom
sweeper = FireBroom(access_policy='test12', ftd_host='10.11.6.191', fmc_host='10.11.6.60', rule_prepend_name='test_st_beta_2', zone_of_last_resort='outside_zone')
sweeper.collapse_fmc_rules(comment='tester123')

sweeper = FireBroom(access_policy='test12', ftd_host='10.11.6.191', fmc_host='10.11.6.60',
rule_prepend_name='test_st_beta_2', zone_of_last_resort='outside_zone')
sweeper.collapse_fw_rules(comment='tester123')
```
### IC
```python
Expand Down
3 changes: 3 additions & 0 deletions examples/acl_modify_example.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
source,destination,service,port_range_low,port_range_high,protocol,comments
1.1.1.1,8.8.8.8,yoyo,22,22,TCP,add
1.1.1.1,8.8.8.8,yoyo,1046,2056,UDP,remove
38 changes: 34 additions & 4 deletions examples/firewall_config_example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,44 @@ ruleset_type: ALLOW
# APPENDED COMMENT TO RULES
rule_comment: None


# check to see if IPPP exist in the firewalls ruleset
ippp_checkup: false


#/ CLEAN UP SECTION
cleanup: true
# if there are rules that you want to collapse/reorganize for firewall scaffolding.
rule_cleanup: false
rule_cleanup: true
# if you want change the name of host objects to there DNS or IP attributes.
object_cleanup: true
# change the name of the network objects
# resolve: whether you want IP or DNS as the new name value for indiuduval objects
# group: change the whole name of the grouped objects defined by @rule_prepend_name
# DEFAULT: will change convert legacy net_group to NetGroup format
clean_type: group
remove_unused: true
#\ CLEAN UP SECTION

# emergency switch in case something happened(network/power drop) in rule cleanup module since it involved rearranges the firewall configs.
# this switch will reinstall the ACP ruleset that were modified.
# recover old ACP file if the program crashed
recovery_mode: false
recovery_mode: true

#/ OBJECT GENERATION AND IDENTIFICATION
# if you want to use the ACTUAL PROTOCOL:PORT pair instead of the port defined name you can enable strict checking
strict_checkup: true
strict_checkup: true
# if you want objects in the firewall to have DNS resolved local hostnames instead of IPs as the object name, enable this
# you can also filter on what SUFFIX domain you just want the hostname for
resolve_objects: true
# This accepts regexs
dont_include_domains_suffix: '.com|.org|.net|.hole|.einet.einetworks.blankmarks.com|.ein.eil'
#\ OBJECT GENERATION AND IDENTIFICATION


#/ GENERAL INFORMATION FROM FIREWALLS
# this will save all or specific rules from the firewall or management device
save_rules: False
# if you need to save specific rules it will use the "rule_prepend_name" mentioned above
save_specific_rules: False
#/ GENERAL INFORMATION FROM FIREWALLS

74 changes: 56 additions & 18 deletions fw_cleanup.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from datetime import datetime
from re import split, match
from re import split, match,sub

from fw_deploy import FireStick
from tqdm import tqdm
Expand All @@ -8,12 +8,13 @@
from os import replace
from fw_test import FireCheck


class FireBroom(FireStick):
def __init__(self, configuration_data: dict, cred_file=None):
configuration_data['ippp_location'] = None
super().__init__(configuration_data=configuration_data, cred_file=cred_file)
self.rest_connection()
self.temp_dir = 'temp_rules'
self.dt_now = datetime.now().replace(microsecond=0).strftime("%Y_%m_%d_%H%_M%_S")
self.save_ext = 'rulbk'

def is_prepend_naming_correct(self,name_of_obj):
# check if it's a exact name match of the group
Expand Down Expand Up @@ -44,7 +45,8 @@ def del_fmc_objects(self, type_, obj_type):
self.fmc_net_port_info()
if not isinstance(self.rule_prepend_name, str):
raise ValueError(f'self.rule_prepend_name value is not type str. you passed an {type(self.rule_prepend_name)} object')
self.utils.permission_check(f'Are you sure you want to delete {obj_type.upper()} ***{self.rule_prepend_name}*** {type_} objects?')
normalize_str = sub('[^A-Za-z0-9|\-|_]+',' ', str(obj_type).upper())
self.utils.permission_check(f'Are you sure you want to delete {normalize_str} ***{self.rule_prepend_name}*** {type_} objects?')
if type_ == 'network':
def net_delete():
del_list = [i[2] for i in self.net_data if self.is_prepend_naming_correct(i[0])] if self.rule_prepend_name != 'all' else [i[2] for i in self.net_data]
Expand Down Expand Up @@ -100,7 +102,14 @@ def del_port_group():

elif type_ == 'rule':
acp_id, acp_rules = self.retrieve_rule_objects()
del_list = [i['name'] for i in acp_rules if self.rule_prepend_name in i['name']] if self.rule_prepend_name != 'all' else acp_rules
if isinstance(obj_type,str):
# deleting via kwrd rule name or 'all'
del_list = [i['name'] for i in acp_rules if self.rule_prepend_name in i['name']] if obj_type != 'all' else acp_rules
else:
# deleting via passed list object
# make sure rules exist in ruleset
del_list = [i['name'] for i in acp_rules if i['name'] in obj_type]

for obj_id in tqdm(del_list, total=len(del_list), desc=f'deleting {self.rule_prepend_name} rules'):
try:
self.fmc.policy.accesspolicy.accessrule.delete(container_uuid=acp_id, name=obj_id)
Expand All @@ -109,14 +118,16 @@ def del_port_group():
else:
raise ValueError(f'type_ not found please select rule, port, or network. you passed {type_}')

def collapse_fmc_rules(self, comment: str = False, recover: bool = False):
temp_dir = 'temp_rules'
dt_now = datetime.now().replace(microsecond=0).strftime("%Y_%m_%d_%H%_M%_S")
save_ext = 'rulbk'
recovery_fname = f'{self.rule_prepend_name}_save_{dt_now}.{save_ext}'
recovery_loc = self.utils.create_file_path(temp_dir, recovery_fname)
if not isinstance(comment, str):
raise ValueError('COMMENT VALUE MUST BE PASSED')
@staticmethod
def backup_rules_op(acp_rules,recovery_loc):
rollback_acp = acp_rules.copy()
with open(recovery_loc, 'wb') as save_rule:
pickle.dump(rollback_acp, save_rule)

def prep_and_recover_fw_rules(self, recover: bool = False):
recovery_fname = f'{self.rule_prepend_name}_save_{self.dt_now}.{self.save_ext}'
recovery_loc = self.utils.create_file_path(self.temp_dir, recovery_fname)

acp_id, acp_rules = self.retrieve_rule_objects()
self.fmc_net_port_info()
if not recover:
Expand All @@ -129,22 +140,29 @@ def collapse_fmc_rules(self, comment: str = False, recover: bool = False):
# there should only one file in this dir from last run
if recover:
self.logfmc.warning('entering recovery mode')
recovery_loc = self.utils.get_files_from_dir(temp_dir, save_ext)[0]
recovery_loc = self.utils.get_files_from_dir(self.temp_dir, self.save_ext)[0]
with open(recovery_loc, 'rb') as save_rule:
rollback_acp = pickle.load(save_rule)
self.logfmc.debug(f'recovered {recovery_loc} file')
acp_rules = rollback_acp
# todo: need to let the user chose if they want to optimze the config are just insert the old config from the recover file
else:
# in case we fail our rule test or error happens while processing
rollback_acp = acp_rules.copy()
with open(recovery_loc, 'wb') as save_rule:
pickle.dump(rollback_acp, save_rule)
self.backup_rules_op(acp_rules,recovery_loc)

for col in acp_rules.columns:
acp_rules[col] = acp_rules[col].apply(lambda x: tuple(v for v in x) if isinstance(x, list) else x)
# fill in vals that are really any
# fill in vals that are really any
acp_rules.replace({None: 'any'}, inplace=True)

return acp_rules,acp_id,recovery_fname,recovery_loc

def collapse_fw_rules(self, comment: str = False, recover: bool = False):
if not isinstance(comment, str):
raise ValueError('COMMENT VALUE MUST BE PASSED')
# DRP the fw rules
acp_rules,acp_id,recovery_fname,recovery_loc = self.prep_and_recover_fw_rules(recover)

# collapse FW rules by zone
grouped_rules = acp_rules.groupby(['src_z', 'dst_z'])
gpl = grouped_rules.size()[grouped_rules.size() > 0].index.values.tolist()
Expand Down Expand Up @@ -297,6 +315,26 @@ def clean_object_store(self, clean_type):
else:
raise NotImplementedError('grouped cleaned issue')

def remove_non_hit_rules(self):
if not self.config_data.get('delete_unused_rules'):
self.logfmc.error('NO HITCOUNT CSV TO ANALYZE')
return
else:
file_name = self.config_data.get('delete_unused_rules')

# open hitcount CSV
fname = self.utils.create_file_path('archive/non_hit_rules',file_name)
non_hit_rules = pd.read_csv(fname)

# make sure all hit counts are 0
non_hit_rules_names = non_hit_rules['Rule Name'][non_hit_rules["Hit Count"] == 0].tolist()

# send rules for deletion
self.del_fmc_objects(type_='rule',obj_type=non_hit_rules_names)




def rollback_acp_op(self, rollback_pd, acp_id, comment: str = False):
rollback_pd.rename(columns={'src_z': 'source_zone', 'dst_z': 'destination_zone', 'source': 'source_network', 'destination': 'destination_network'}, inplace=True)
rollback_pd.drop(columns=['policy_name'], inplace=True)
Expand Down
120 changes: 120 additions & 0 deletions fw_compliance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
from multiprocessing import Pool, cpu_count

from fw_deploy import FireStick
from datetime import datetime
import pandas as pd
from re import search
from os import getpid


class FireComply(FireStick):

def __init__(self, configuration_data: dict, cred_file=None):
super().__init__(configuration_data=configuration_data, cred_file=cred_file)
self.rest_connection()
self.comply_dir = 'compliance_rules'
self.dt_now = datetime.now().replace(microsecond=0).strftime("%Y%m%d_%H%M")

def export_current_policy(self):
self.logfmc.warning('Trying to Export rule(s) from Firewall')
output_dir = f'{self.comply_dir}/specific_rules' if self.config_data.get('save_specific_rules') else f'{self.comply_dir}/all_rules'
output_file = f'{self.rule_prepend_name}_{self.access_policy}_{self.dt_now}.csv' if self.config_data.get('save_specific_rules') else f'{self.access_policy}_{self.dt_now}.csv'

# get rules
current_ruleset = self.transform_rulesets(save_current_ruleset=True)
if self.config_data.get('save_specific_rules'):
current_ruleset = current_ruleset[current_ruleset['policy_name'].str.startswith(self.rule_prepend_name)]

# prettify
if self.config_data.get('pretty_rules'):
current_ruleset.drop(columns=['src_z', 'dst_z', 'port', 'source', 'destination'], inplace=True)
parsed_ruleset = []
# TRY to use n-1 physical cores ( dont want anymore imports)
core_group = int((cpu_count() / 2)) - 1
core_group = core_group if core_group > 0 else 1
pool = Pool(core_group)

# internal func to collect subset_df
def rule_gatherer_callback(data):
parsed_ruleset.append(data)
return parsed_ruleset

def log_func_error(error):
self.logfmc.error(error)

current_ruleset.reset_index(inplace=True, drop=True)
# break list elm into cells
for cr_i in current_ruleset.index:
pool.apply_async(self.rule_spool, args=(cr_i, current_ruleset,), callback=rule_gatherer_callback, error_callback=log_func_error)
pool.close()
pool.join()

# combine dfs into one
parsed_ruleset = pd.concat(parsed_ruleset, ignore_index=True)

# save rule to disk in CSV format
output_dir = f'{output_dir}/pretty'
save_name = self.utils.create_file_path(output_dir, output_file)
parsed_ruleset.to_csv(save_name, index=False)
self.logfmc.warning(f'Current Rules saved to {save_name}')
return

# RAW output
save_name = self.utils.create_file_path(output_dir, output_file)
current_ruleset.to_csv(save_name, index=False)

def rule_spool(self, idx, current_ruleset):
self.logfmc.debug(f'spawning new process for rule_spool on {getpid()}')
rule_loc = current_ruleset.iloc[idx]
collasped_rule = [rule_loc.to_dict()]
# useed to stop loop this is amount of columns to make pass on to make sure we unravel them all
iter_stop = rule_loc.shape[0]
# open rules up
collsaped_collector = []
while 0 < iter_stop:
for rule_item in collasped_rule:
for k, v in rule_item.items():
if isinstance(v, list):
for i in v:
# make a copy for editing
expanded_rule = rule_item.copy()
expanded_rule[k] = i
# dont double add
if expanded_rule not in collsaped_collector:
collsaped_collector.append(expanded_rule)
# check if rule item is not a list and if hasent been seen in the col list
if rule_item not in collsaped_collector and len(collsaped_collector) != 0:
collsaped_collector.append(rule_item)

# test if we still have list in our df
collasped_rule = collsaped_collector + collasped_rule
iter_stop -= 1

subset_df = pd.DataFrame(collasped_rule)
# remove list items
for col in subset_df.columns:
subset_df[col] = subset_df[col][subset_df[col].apply(lambda x: not isinstance(x, list))]
subset_df.dropna(inplace=True)

# if we need rules just for specific IPs
specific_ips = self.config_data.get('specific_src_dst')
if specific_ips:
src_spec = subset_df[subset_df['real_source'].apply(lambda x: bool(search(specific_ips, x))) & subset_df['real_destination'].apply(lambda x: not bool(search(specific_ips, x)))]
dst_spec = subset_df[subset_df['real_destination'].apply(lambda x: bool(search(specific_ips, x))) & subset_df['real_source'].apply(lambda x: not bool(search(specific_ips, x)))]
subset_df = pd.concat([src_spec, dst_spec], ignore_index=True)
subset_df.dropna(inplace=True)

# need to get the port name, so we can match what we have listed in the FW
port_data = pd.DataFrame(self.port_data)
port_data['port_val'] = port_data[1].astype(str) + ":" + port_data[2].astype(str)

# port lookup
subset_df['port name'] = subset_df['real_port'].apply(lambda x: port_data[0][port_data['port_val'] == x].iloc[0])

# adjust port cols and rename
subset_df['protocol'] = subset_df['real_port'].apply(lambda x: x.split(':')[0])
subset_df['low port range'] = subset_df['real_port'].apply(lambda x: x.split(':')[1].split('-')[0] if x != 'any' else x)
subset_df['high port range'] = subset_df['real_port'].apply(lambda x: x.split(':')[1].split('-')[-1] if x != 'any' else x)
subset_df.drop(columns=['real_port'], inplace=True)
subset_df.rename(columns={'real_source': 'source', 'real_destination': 'destination'}, inplace=True)
return subset_df
Loading

0 comments on commit ab7b166

Please sign in to comment.