Skip to content

Commit

Permalink
Merge branch 'main' into issue208
Browse files Browse the repository at this point in the history
  • Loading branch information
schaffung authored May 6, 2021
2 parents 592dfae + 68eb731 commit 70eeb97
Show file tree
Hide file tree
Showing 16 changed files with 440 additions and 234 deletions.
21 changes: 20 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,19 @@

Design Doc Link : [Gluster-test Design-doc](https://docs.google.com/document/d/1D8zUSmg-00ey711gsqvS6G9i_fGN2cE0EbG4u1TOsaQ/edit?usp=sharing)

### Structure:
# Contents
* [Structure](#structure)
* [Set up](#set-up)
* [About](#flags)

## Structure:

core: contains the core redant framework which includes parsing,test_list_builder,test_runner,runner_thread and redant_main.<br>
common: consists of the libs and ops that will help in running the test cases and the mixin class.<br>
tests: holds the test cases as performace and functional tests and includes parent test. Add any new test cases here.<br>

## Set up

### To start Working:

1. Clone redant repo.
Expand Down Expand Up @@ -55,3 +62,15 @@ For example,
One can also run the scripts given under the tools dir which will reduce the
lengthy commands to be typed out everytime. Check out the README.md at the link
[Tools-README](https://github.com/srijan-sivakumar/redant/blob/main/tools/README.md)

## About

### Flags

* -c, --config : Stores the path of the config file(s) to read. You need to provide the path else by default it is `None`. Moreover, this is a required argument so you need to provide it for sure.
* -t, --test-dir : The path of the test directory where test cases exist. You can also provide the path to the specific test file. But in that case remember the `-sp` flag :upside_down_face:. This is also a required argument so don't forget it.
* -l, --log-dir : It stores the path of the log directory where you want the log files to be kept. By default it stores `/tmp/redant` and it is not a required argument.
* -ll, --log-level : The log level you want for the execution.By default the log level is `I` (INFO). There are other log levels also like `D`(DEBUG), `W`(WARN) etc.
* -cc, --concurrency-count : It stores the number of concurrent tests run. By default it is 4.
* -rf, --result-file : It stores the path of the result file. By default it is `None`
* -xls, --excel-sheet : It stores the path of the excel sheet. By default it is `None`.
4 changes: 0 additions & 4 deletions common/ops/abstract_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ def execute_abstract_op_node(self, cmd : str, node : str=None):
self.logger.error(ret['msg']['opErrstr'])
raise Exception(ret['msg']['opErrstr'])

self.logger.info(f"Successfully ran {cmd} on {node}")

return ret

def execute_abstract_op_multinode(self, cmd : str, node : str=None):
Expand All @@ -59,8 +57,6 @@ def execute_abstract_op_multinode(self, cmd : str, node : str=None):
self.logger.error(ret['msg']['opErrstr'])
raise Exception(ret['msg']['opErrstr'])

self.logger.info(f"Successfully ran {cmd} on {node}")

return ret


Expand Down
72 changes: 72 additions & 0 deletions common/ops/support_ops/io_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,75 @@ def execute_io_cmd(self, cmd: str, host: str = None):
ret = self.execute_abstract_op_node(cmd, host)

return ret

def create_file(self, path: str, filename: str, node: str):
"""
Creates a file in the specified specified path
"""
cmd = f"touch {path}/{filename}"
self.execute_abstract_op_node(cmd, node)

def create_dir(self, path: str, dirname: str, node: str):
"""
Creates a directory in the specified path
"""
cmd = f"mkdir -p {path}/{dirname}"
self.execute_abstract_op_node(cmd, node)

def create_dirs(self, list_of_nodes: list, list_of_dir_paths: list):
"""
Create directories on nodes.
Args:
list_of_nodes (list): Nodes on which dirs has to be created.
list_of_dir_paths (list): List of dirs abs path.
Returns:
bool: True of creation of all dirs on all nodes is successful.
False otherwise.
"""
if not isinstance(list_of_nodes, list):
list_of_nodes = [list_of_nodes]

if isinstance(list_of_dir_paths, list):
list_of_dir_paths = ' '.join(list_of_dir_paths)

# Create upload dir on each node
cmd = f"mkdir -p {list_of_dir_paths}"
_rc = True

ret = self.execute_command_multinode(cmd,list_of_nodes)
for each_ret in ret:
if each_ret['error_code'] != 0:
self.logger.error(f"Failed to create the dirs: {list_of_dir_paths.split(' ')} "
f"on node: {each_ret['node']} - {each_ret['error_msg']}")
_rc = False

return _rc


def path_exists(self, list_of_nodes, list_of_paths):
"""Check if paths exist on nodes.
Args:
list_of_nodes (list): List of nodes.
list_of_paths (list): List of abs paths to verify if path exist.
Returns:
bool: True if all paths exists on all nodes. False otherwise.
"""
if not isinstance(list_of_nodes, list):
list_of_nodes = [list_of_nodes]

if not isinstance(list_of_paths, list):
list_of_paths = (list_of_paths.split(" "))


_rc = True

for path in list_of_paths:
cmd = f"ls -l {path}"
ret = self.execute_command_multinode(cmd, list_of_nodes)
for each_ret in ret:
if each_ret['error_code'] != 0:
error_string = each_ret['error_msg'].rstrip('\n')
self.logger.error(f"{error_string} on node {each_ret['node']}")
_rc = False

return _rc
141 changes: 131 additions & 10 deletions common/rexe.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import os
import time
import random
import concurrent.futures
import paramiko
Expand Down Expand Up @@ -28,18 +30,21 @@ def establish_connection(self):
self.connect_flag = True

for node in self.host_dict:

node_ssh_client = paramiko.SSHClient()
node_ssh_client.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
node_ssh_client.load_host_keys(
os.path.expanduser('/root/.ssh/known_hosts'))
mykey = paramiko.RSAKey.from_private_key_file('/root/.ssh/id_rsa')
try:
node_ssh_client.connect(
hostname=node,
username=self.host_dict[node]['user'],
password=self.host_dict[node]['passwd'])
self.logger.debug(f"SSH connection to {node} is successful.")
pkey=mykey,
)

except Exception as e:
self.logger.error(f"Connection failure. Exception : {e}")
self.connect_flag = False
raise e
self.node_dict[node] = node_ssh_client

def deconstruct_connection(self):
Expand Down Expand Up @@ -93,15 +98,14 @@ def execute_command(self, cmd, node):
except Exception as e:
# Reconnection to be done.
node_ssh_client = paramiko.SSHClient()
node_ssh_client.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
node_ssh_client.load_host_keys(
os.path.expanduser('/root/.ssh/known_hosts'))
mykey = paramiko.RSAKey.from_private_key_file('/root/.ssh/id_rsa')
try:
node_ssh_client.connect(
hostname=node,
username=self.host_dict[node]['user'],
password=self.host_dict[node]['passwd'],
pkey=mykey,
)
self.logger.debug(f"SSH connection to {node} is successful.")
self.node_dict[node] = node_ssh_client
except Exception as e:
self.logger.error(f"Connection failure. Exceptions {e}.")
Expand Down Expand Up @@ -129,6 +133,123 @@ def execute_command(self, cmd, node):
self.logger.debug(ret_dict)
return ret_dict

@dispatch(str)
def execute_command_async(self, cmd: str) -> dict:
"""
Module to handle random node async execution.
Returns:
ret: A dictionary consisting
- cmd : Command requested
- node : Node wherein it was run
- stdout : The stdout handle
- stderr : The stderr handle
"""
return self.execute_command_async(cmd, self._random_node())

@dispatch(str, str)
def execute_command_async(self, cmd: str, node: str) -> dict:
"""
Function to execute command asynchronously in the given node.
Args:
cmd (string): Command to be executed.
node (string) : The node ip wherein the command is to be run.
Returns:
ret: A dictionary consisting
- cmd : Command requested
- node : Node wherein the command was run
- stdout : The stdout handle
- stderr : The stderr handle
"""
async_obj = {}

if not self.connect_flag:
return async_obj
try:
_, stdout, stderr = self.node_dict[node].exec_command(cmd)
async_obj = {"cmd": cmd, "node": node, "stdout": stdout,
"stderr": stderr}
except Exception as e:
# Reconnection to be done.
node_ssh_client = paramiko.SSHClient()
node_ssh_client.load_host_keys(
os.path.expanduser('/root/.ssh/known_hosts'))
mykey = paramiko.RSAKey.from_private_key_file('/root/.ssh/id_rsa')
try:
node_ssh_client.connect(
hostname=node,
pkey=mykey,
)
self.node_dict[node] = node_ssh_client
except Exception as e:
self.logger.error(f"Connection failure. Exceptions {e}.")
# On rebooting the node
_, stdout, stderr = self.node_dict[node].exec_command(cmd)

async_obj = {"cmd": cmd, "node": node, "stdout": stdout,
"stderr": stderr}
return async_obj

def check_async_command_status(self, async_obj: dict) -> bool:
"""
A check to see if the async execution of a command which
was dispatched has been finished.
Args:
async_obj (dict) : Contains the details about the async command,
with keys -> 'stdout', 'stderr', 'cmd', 'node'
Returns:
Bool : True if the operations is completed or else False.
"""
return async_obj["stdout"].channel.exit_status_ready()

def collect_async_result(self, async_obj: dict) -> dict:
"""
Collect the async command's execution result after it ends.
Args:
async_obj (dict) : Contains the details about the async command,
with keys -> 'stdout', 'stderr', 'cmd', 'node'
Returns:
dict: Returns the resultant dictionary
"""
ret_dict = {}
if async_obj['stdout'].channel.recv_exit_status() != 0:
ret_dict['Flag'] = False
ret_dict['msg'] = async_obj['stdout'].readlines()
ret_dict['error_msg'] = async_obj['stderr'].readlines()
if isinstance(ret_dict['error_msg'], list):
ret_dict['error_msg'] = "".join(ret_dict['error_msg'])
else:
if async_obj['cmd'].find("--xml") != -1:
stdout_xml_string = "".join(async_obj['stdout'].readlines())
ret_dict['msg'] = json.loads(json.dumps(xmltodict.parse(
stdout_xml_string)))['cliOutput']
else:
ret_dict['msg'] = async_obj['stdout'].readlines()
ret_dict['Flag'] = True
ret_dict['node'] = async_obj['node']
ret_dict['cmd'] = async_obj['cmd']
ret_dict['error_code'] = async_obj['stdout'].channel.recv_exit_status()

self.logger.debug(ret_dict)
return ret_dict

def wait_till_async_command_ends(self, async_obj: dict) -> dict:
"""
Stay put till the async command finished it's execution and
provide the required return value.
Args:
async_obj (dict) : Contains the details about the async command,
with keys -> 'stdout', 'stderr', 'cmd', 'node'
Returns:
dict: Returns the resultant dictionary after the command ends.
"""
while not async_obj['stdout'].channel.exit_status_ready():
time.sleep(1)
if async_obj['stdout'].channel.recv_ready():
ret_dict = self.collect_async_result(async_obj)
break

return ret_dict

@dispatch(str)
def execute_command_multinode(self, cmd):
"""
Expand Down
27 changes: 8 additions & 19 deletions config/CONFIG_README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,39 +6,28 @@ The components of the config.yml file are as follows:

<h3>1. servers_info</h3>
'servers_info' is info about each server in the cluster.<br>
Each server is defined by its ip address.
Each server should contain 4 attributes:<br>
1) ip(key representing server): ip address of the server.<br>
2) brick_root: the list of directories where bricks have to be created.<br>
3) user: the username of the server for ssh connection.<br>
4) passwd: the password of the server for ssh connection.<br>
All the above attributes have to defined by the user.<br>
Each server is defined by its ip address which acts as a key representing the server.<br>
Each server should contain 1 attribute:<br>
1) brick_root: the list of directories where bricks have to be created.<br>
The above attribute has to be defined by the user.<br>
If a new server has to added, then it has to follow the convention of the
previous servers
previous servers.

Example format of one server:<br>

ip:<br>
&nbsp;&nbsp;&nbsp;&nbsp; brick_root: ["/bricks","/gluster"]<br>
&nbsp;&nbsp;&nbsp;&nbsp; user: "root"<br>
&nbsp;&nbsp;&nbsp;&nbsp; passwd: "redhat"<br>

<h3>2. clients_info</h3>
'clients_info' is info about each client in the cluster.<br>
Each client is defined by its ip address.
Each client should contain 3 attributes:<br>
1) ip: ip address of the client.<br>
2) user: the username of the client for ssh connection.<br>
3) passwd: the password of the client for ssh connection.<br>
All the above attributes have to defined by the user.<br>
Each client is defined by its ip address which acts as a key representing the client.<br>
The client does not take any attribute values.<br>
If a new client has to added, then it has to follow the convention of the
previous clients
previous clients.

Example format of one client:<br>

ip:<br>
&nbsp;&nbsp;&nbsp;&nbsp; user: "root"<br>
&nbsp;&nbsp;&nbsp;&nbsp; passwd: "redhat"<br>

<h3>3. volume_types</h3>
'volume_types' defines different volume types that we can create in
Expand Down
12 changes: 0 additions & 12 deletions config/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,29 +6,17 @@
servers_info:
"1.1.1.1":
brick_root: ["/bricks"]
user: "root"
passwd: "redhat"
"2.2.2.2":
brick_root: ["/bricks"]
user: "root"
passwd: "redhat"
"3.3.3.3":
brick_root: ["/bricks"]
user: "root"
passwd: "redhat"
"4.4.4.4":
brick_root: ["/bricks"]
user: "root"
passwd: "redhat"

#clients_info - All the relevant information about the clients
clients_info:
"5.5.5.5":
user: "root"
passwd: "redhat"
"6.6.6.6":
user: 'root'
passwd: "redhat"

#volume_types - Indivudual volume type information and minimum servers for
# each volume type
Expand Down
Loading

0 comments on commit 70eeb97

Please sign in to comment.