From e8dde1b3f023331d80b63511109b7e88a7ffaff5 Mon Sep 17 00:00:00 2001
From: Piotr Roslaniec
Date: Wed, 29 Nov 2023 14:58:27 +0100
Subject: [PATCH 1/4] chore(ruff): configure ruff
---
.github/workflows/python.yml | 10 +-
nucypher_ops/__about__.py | 4 +-
nucypher_ops/cli/ethereum.py | 82 +-
nucypher_ops/cli/main.py | 4 +-
nucypher_ops/cli/namespaces.py | 33 +-
nucypher_ops/cli/nodes.py | 392 +++++---
nucypher_ops/cli/porter.py | 82 +-
nucypher_ops/cli/recover_utils.py | 69 +-
nucypher_ops/cli/tbtcv2.py | 572 +++++++----
nucypher_ops/cli/ursula.py | 724 ++++++++++----
nucypher_ops/constants.py | 26 +-
nucypher_ops/ops/ansible_utils.py | 86 +-
nucypher_ops/ops/contracts.py | 12 +-
nucypher_ops/ops/fleet_ops.py | 1487 ++++++++++++++++-------------
nucypher_ops/ops/keygen.py | 18 +-
pyproject.toml | 7 +
setup.py | 40 +-
17 files changed, 2347 insertions(+), 1301 deletions(-)
create mode 100644 pyproject.toml
diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml
index 1c7cc8b..226bf31 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/python.yml
@@ -16,10 +16,14 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
+
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install setuptools wheel
+ pip install setuptools wheel ruff
+
+ - name: Run ruff
+ run: ruff check nucypher_ops
+
- name: Build dist
- run: |
- python setup.py sdist bdist_wheel
+ run: python setup.py sdist bdist_wheel
diff --git a/nucypher_ops/__about__.py b/nucypher_ops/__about__.py
index e2ff577..b80fac7 100644
--- a/nucypher_ops/__about__.py
+++ b/nucypher_ops/__about__.py
@@ -2,7 +2,7 @@
__url__ = "https://github.com/nucypher/nucypher-ops"
-__summary__ = 'Install and management tools for a proxy re-encryption network to empower privacy in decentralized systems.'
+__summary__ = "Install and management tools for a proxy re-encryption network to empower privacy in decentralized systems."
__version__ = "0.12.0"
@@ -12,4 +12,4 @@
__license__ = "GNU Affero General Public License, Version 3"
-__copyright__ = 'Copyright (C) 2022 NuCypher'
+__copyright__ = "Copyright (C) 2022 NuCypher"
diff --git a/nucypher_ops/cli/ethereum.py b/nucypher_ops/cli/ethereum.py
index 16b9d78..3a1988e 100644
--- a/nucypher_ops/cli/ethereum.py
+++ b/nucypher_ops/cli/ethereum.py
@@ -1,38 +1,78 @@
-from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK
-from nucypher_ops.ops.fleet_ops import CloudDeployers
import os
+
import click
+
+from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK
+from nucypher_ops.ops.fleet_ops import CloudDeployers
+
emitter = click
-@click.group('ethereum')
+@click.group("ethereum")
def cli():
"""deploy and update geth nodes"""
-@cli.command('deploy')
-@click.option('--image', help="The geth image to deploy", default='ethereum/client-go:stable')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="additional environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="additional cli arguments for geth", multiple=True, type=click.STRING, default=[])
+@cli.command("deploy")
+@click.option(
+ "--image", help="The geth image to deploy", default="ethereum/client-go:stable"
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to update",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="additional environment variables (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="additional cli arguments for geth",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
def deploy(image, namespace, network, include_hosts, envvars, cliargs):
"""Deploys NuCypher on managed hosts."""
- deployer = CloudDeployers.get_deployer('ethereum')(emitter,
- docker_image=image,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='ethereum'
- )
+ deployer = CloudDeployers.get_deployer("ethereum")(
+ emitter,
+ docker_image=image,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="ethereum",
+ )
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.deploy(hostnames)
diff --git a/nucypher_ops/cli/main.py b/nucypher_ops/cli/main.py
index 3b83548..8fdbfd2 100644
--- a/nucypher_ops/cli/main.py
+++ b/nucypher_ops/cli/main.py
@@ -1,11 +1,11 @@
import click
-from nucypher_ops.cli.nodes import cli as nodes
-from nucypher_ops.cli.ursula import cli as ursula
from nucypher_ops.cli.ethereum import cli as ethereum
from nucypher_ops.cli.namespaces import cli as namespaces
+from nucypher_ops.cli.nodes import cli as nodes
from nucypher_ops.cli.porter import cli as porter
from nucypher_ops.cli.tbtcv2 import cli as tbtcv2
+from nucypher_ops.cli.ursula import cli as ursula
from importlib.metadata import version
diff --git a/nucypher_ops/cli/namespaces.py b/nucypher_ops/cli/namespaces.py
index e0ab0f4..cdeed95 100644
--- a/nucypher_ops/cli/namespaces.py
+++ b/nucypher_ops/cli/namespaces.py
@@ -1,17 +1,26 @@
-from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK, NETWORKS
-from nucypher_ops.ops.fleet_ops import CloudDeployers
-import os
import click
+
+from nucypher_ops.constants import NETWORKS
+from nucypher_ops.ops.fleet_ops import CloudDeployers
+
emitter = click
-@click.group('namespaces')
+@click.group("namespaces")
def cli():
"""Organize the machinery"""
-@cli.command('list')
-@click.option('--all', help="list all namespaces under all networks", default=False, is_flag=True)
-@click.option('--network', help="The network whose namespaces you want to see.", type=click.Choice(NETWORKS.keys()), default='mainnet')
+
+@cli.command("list")
+@click.option(
+ "--all", help="list all namespaces under all networks", default=False, is_flag=True
+)
+@click.option(
+ "--network",
+ help="The network whose namespaces you want to see.",
+ type=click.Choice(NETWORKS.keys()),
+ default="mainnet",
+)
def list_namespaces(network, all):
"""lists namespaces"""
if all:
@@ -19,12 +28,14 @@ def list_namespaces(network, all):
else:
networks = [network]
deployers = [
- CloudDeployers.get_deployer('generic')(emitter, network=network, pre_config={"namespace": None})
- for network in networks]
+ CloudDeployers.get_deployer("generic")(
+ emitter, network=network, pre_config={"namespace": None}
+ )
+ for network in networks
+ ]
for deployer in deployers:
namespaces = deployer.get_namespace_names()
if namespaces:
emitter.echo(deployer.network)
for ns in namespaces:
- emitter.echo(f'\t{ns}')
-
\ No newline at end of file
+ emitter.echo(f"\t{ns}")
diff --git a/nucypher_ops/cli/nodes.py b/nucypher_ops/cli/nodes.py
index 58b8121..ad93d89 100644
--- a/nucypher_ops/cli/nodes.py
+++ b/nucypher_ops/cli/nodes.py
@@ -1,3 +1,4 @@
+import importlib
import json
import click
@@ -9,103 +10,201 @@
emitter = click
-@click.group('nodes')
+@click.group("nodes")
def cli():
"""Manage the machinery"""
-@cli.command('create')
-@click.option('--region', help="provider specific region name (like us-east-1 or SFO3", default=None)
-@click.option('--instance-type', help="provider specific instance size like `s-1vcpu-2gb` or `t3.small`", default=None)
-@click.option('--cloudprovider', help="aws or digitalocean", default=None)
-@click.option('--count', help="Create this many nodes.", type=click.INT, default=1)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--nickname', help="A nickname by which to remember the created hosts", type=click.STRING, required=False)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
+@cli.command("create")
+@click.option(
+ "--region",
+ help="provider specific region name (like us-east-1 or SFO3",
+ default=None,
+)
+@click.option(
+ "--instance-type",
+ help="provider specific instance size like `s-1vcpu-2gb` or `t3.small`",
+ default=None,
+)
+@click.option("--cloudprovider", help="aws or digitalocean", default=None)
+@click.option("--count", help="Create this many nodes.", type=click.INT, default=1)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--nickname",
+ help="A nickname by which to remember the created hosts",
+ type=click.STRING,
+ required=False,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
def create(region, instance_type, cloudprovider, count, nickname, namespace, network):
"""Creates the required number of workers to be staked later under a namespace"""
- available_providers = ['aws', 'digitalocean']
- choice_list = '\n\t'.join(available_providers)
+ available_providers = ["aws", "digitalocean"]
+ choice_list = "\n\t".join(available_providers)
if not cloudprovider:
cloudprovider = emitter.prompt(
f"Please choose a Cloud Service Provider from these options: \n\t{choice_list}\n",
type=emitter.Choice(available_providers),
- show_choices=False
+ show_choices=False,
)
- if cloudprovider == 'aws':
- try:
- import boto3
- except ImportError:
+ if cloudprovider == "aws":
+ spec = importlib.util.find_spec("boto3")
+ if spec is None:
raise click.BadOptionUsage(
- 'cloudprovider', "You must have boto3 installed to create aws nodes. run `pip install boto3` or use `--cloudprovider digitalocean`")
+ "cloudprovider",
+ "You must have boto3 installed to create aws nodes. run `pip install boto3` or use `--cloudprovider digitalocean`",
+ )
- deployer = CloudDeployers.get_deployer(cloudprovider)(emitter,
- namespace=namespace, network=network, instance_type=instance_type, action='create', region=region)
+ deployer = CloudDeployers.get_deployer(cloudprovider)(
+ emitter,
+ namespace=namespace,
+ network=network,
+ instance_type=instance_type,
+ action="create",
+ region=region,
+ )
names = []
i = 1
while len(names) < count:
- name = (nickname or f'{namespace}-{network}') + f'-{i}'
- if name not in deployer.config.get('instances', {}):
+ name = (nickname or f"{namespace}-{network}") + f"-{i}"
+ if name not in deployer.config.get("instances", {}):
names.append(name)
i += 1
deployer.create_nodes(names)
emitter.echo(
- f"done. created {count} nodes. list existing nodes with `nucypher-ops nodes list`")
-
-
-@cli.command('add')
-@click.option('--host-address', help="The IP address or Hostname of the host you are adding.", required=True)
-@click.option('--login-name', help="The name username of a user with root privileges we can ssh as on the host.", required=True)
-@click.option('--key-path', help="The path to a keypair we will need to ssh into this host (default: ~/.ssh/id_rsa)", default="~/.ssh/id_rsa")
-@click.option('--ssh-port', help="The port this host's ssh daemon is listening on (default: 22)", default=22)
-@click.option('--nickname', help="A nickname to remember this host by", type=click.STRING, required=True)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='nucypher')
-@click.option('--network', help="The Nucypher network name these hosts will run on. (default mainnet)", type=click.STRING, default=DEFAULT_NETWORK)
+ f"done. created {count} nodes. list existing nodes with `nucypher-ops nodes list`"
+ )
+
+
+@cli.command("add")
+@click.option(
+ "--host-address",
+ help="The IP address or Hostname of the host you are adding.",
+ required=True,
+)
+@click.option(
+ "--login-name",
+ help="The name username of a user with root privileges we can ssh as on the host.",
+ required=True,
+)
+@click.option(
+ "--key-path",
+ help="The path to a keypair we will need to ssh into this host (default: ~/.ssh/id_rsa)",
+ default="~/.ssh/id_rsa",
+)
+@click.option(
+ "--ssh-port",
+ help="The port this host's ssh daemon is listening on (default: 22)",
+ default=22,
+)
+@click.option(
+ "--nickname",
+ help="A nickname to remember this host by",
+ type=click.STRING,
+ required=True,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default="nucypher",
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on. (default mainnet)",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
def add(host_address, login_name, key_path, ssh_port, nickname, namespace, network):
"""Adds an existing node to the local config for future management."""
name = nickname
- deployer = CloudDeployers.get_deployer('generic')(
- emitter, namespace=namespace, network=network, action='add')
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network, action="add"
+ )
deployer.create_nodes([name], host_address, login_name, key_path, ssh_port)
-@cli.command('copy')
-@click.option('--to-namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, required=True)
-@click.option('--to-network', help="The Nucypher network name these hosts will run on. (default mainnet)", type=click.STRING, required=True)
-@click.option('--from', 'from_path', help="The 'path' of a node in /network/namespace/name format ie. '/mainnet/aws-us-east-nodes/aws-1'", type=click.STRING, required=True)
+@cli.command("copy")
+@click.option(
+ "--to-namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ required=True,
+)
+@click.option(
+ "--to-network",
+ help="The Nucypher network name these hosts will run on. (default mainnet)",
+ type=click.STRING,
+ required=True,
+)
+@click.option(
+ "--from",
+ "from_path",
+ help="The 'path' of a node in /network/namespace/name format ie. '/mainnet/aws-us-east-nodes/aws-1'",
+ type=click.STRING,
+ required=True,
+)
def copy(from_path, to_network, to_namespace):
"""
- Copy a node from one namespace to another
-
- ie. nucypher-ops nodes copy --to-namespace allmynodes --from mainnet/eu-central-1/europe-nodes-1
+ Copy a node from one namespace to another
+
+ ie. nucypher-ops nodes copy --to-namespace allmynodes --from mainnet/eu-central-1/europe-nodes-1
"""
try:
- network, namespace, host_name = from_path.lstrip('/').rstrip('/').split('/')
- except Exception as e:
- emitter.echo("please supply --from in the format of /network/namespace/node_nickname")
+ network, namespace, host_name = from_path.lstrip("/").rstrip("/").split("/")
+ except Exception:
+ emitter.echo(
+ "please supply --from in the format of /network/namespace/node_nickname"
+ )
return
-
- source = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+ source = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
host_data = source.get_host_by_name(host_name)
-
- deployer = CloudDeployers.get_deployer('generic')(
- emitter, namespace=to_namespace, network=to_network, action='copy')
+
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=to_namespace, network=to_network, action="copy"
+ )
deployer.add_already_configured_node(host_data)
-@cli.command('list')
-@click.option('--pretty', help="Human readable output", default=False, is_flag=True)
-@click.option('--json', 'as_json', help="output json", default=False, is_flag=True)
-@click.option('--all', help="list all nodes under all networks and namespaces", default=False, is_flag=True)
-@click.option('--network', help="The network whose hosts you want to see.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--namespace', help="The network whose hosts you want to see.", type=click.STRING, default=DEFAULT_NAMESPACE)
+@cli.command("list")
+@click.option("--pretty", help="Human readable output", default=False, is_flag=True)
+@click.option("--json", "as_json", help="output json", default=False, is_flag=True)
+@click.option(
+ "--all",
+ help="list all nodes under all networks and namespaces",
+ default=False,
+ is_flag=True,
+)
+@click.option(
+ "--network",
+ help="The network whose hosts you want to see.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--namespace",
+ help="The network whose hosts you want to see.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
def list(network, namespace, all, as_json, pretty):
"""Prints local config info about known hosts"""
@@ -116,54 +215,82 @@ def list(network, namespace, all, as_json, pretty):
networks = [network]
deployers = [
- CloudDeployers.get_deployer('generic')(emitter, network=network, pre_config={"namespace": namespace}, read_only=True)
- for network in networks]
+ CloudDeployers.get_deployer("generic")(
+ emitter,
+ network=network,
+ pre_config={"namespace": namespace},
+ read_only=True,
+ )
+ for network in networks
+ ]
human_data = []
headers = [
- 'host_nickname',
- 'publicaddress',
+ "host_nickname",
+ "publicaddress",
# 'rest url',
- 'operator address',
- 'provider',
- 'docker_image',
+ "operator address",
+ "provider",
+ "docker_image",
]
for deployer in deployers:
if not as_json:
- emitter.echo(f'{deployer.network}')
+ emitter.echo(f"{deployer.network}")
for ns, hosts in deployer.get_namespace_data(namespace=namespace):
if not as_json and hosts:
- emitter.echo(f'\t{ns}')
+ emitter.echo(f"\t{ns}")
for name, data in hosts:
if not as_json:
- emitter.echo(f'\t\t{name}')
+ emitter.echo(f"\t\t{name}")
if as_json:
print(json.dumps(data, indent=4))
elif pretty:
- entry = [ns, *(str(data.get(field, '?')) for field in headers)]
+ entry = [ns, *(str(data.get(field, "?")) for field in headers)]
human_data.append(entry)
if pretty:
- headers = ['namespace', *headers]
- print(tabulate(human_data, headers=headers, tablefmt='psql'))
-
-
-@cli.command('node-info')
-@click.option('--network', help="The network whose hosts you want to see.", type=click.STRING, required=True)
-@click.option('--namespace', help="The network whose hosts you want to see.", type=click.STRING, required=True)
-@click.option('--include-host', 'include_host', help="The node to print information for", type=click.STRING, required=True)
-@click.option('--json', 'as_json', help="Output information as json", default=False, is_flag=True)
+ headers = ["namespace", *headers]
+ print(tabulate(human_data, headers=headers, tablefmt="psql"))
+
+
+@cli.command("node-info")
+@click.option(
+ "--network",
+ help="The network whose hosts you want to see.",
+ type=click.STRING,
+ required=True,
+)
+@click.option(
+ "--namespace",
+ help="The network whose hosts you want to see.",
+ type=click.STRING,
+ required=True,
+)
+@click.option(
+ "--include-host",
+ "include_host",
+ help="The node to print information for",
+ type=click.STRING,
+ required=True,
+)
+@click.option(
+ "--json", "as_json", help="Output information as json", default=False, is_flag=True
+)
def node_info(network, namespace, include_host, as_json):
"""Prints configuration information about a specific node"""
- deployer = CloudDeployers.get_deployer('generic')(emitter, network=network, namespace=namespace, read_only=True)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, network=network, namespace=namespace, read_only=True
+ )
human_data = []
- headers = ['Property', 'Value']
+ headers = ["Property", "Value"]
host_data = deployer.get_host_by_name(host_name=include_host)
if not host_data:
- raise ValueError(f"Host information for '{include_host}' could not be found in network '{network}' and namespace '{namespace}'")
+ raise ValueError(
+ f"Host information for '{include_host}' could not be found in network '{network}' and namespace '{namespace}'"
+ )
if as_json:
print(json.dumps(host_data, indent=4))
@@ -178,32 +305,52 @@ def node_info(network, namespace, include_host, as_json):
human_data.append(row)
headers = [*headers]
- print(tabulate(human_data, headers=headers, tablefmt='psql', maxcolwidths=[None, 120]))
-
-
-@cli.command('destroy')
-@click.option('--cloudprovider', help="aws or digitalocean")
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Peform this operation on only the named hosts", multiple=True, type=click.STRING)
+ print(
+ tabulate(human_data, headers=headers, tablefmt="psql", maxcolwidths=[None, 120])
+ )
+
+
+@cli.command("destroy")
+@click.option("--cloudprovider", help="aws or digitalocean")
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Peform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def destroy(cloudprovider, namespace, network, include_hosts):
"""Cleans up all previously created resources for the given network for the same cloud provider"""
if not cloudprovider:
- hosts = CloudDeployers.get_deployer('generic')(
- emitter, network=network, namespace=namespace).get_all_hosts()
+ hosts = CloudDeployers.get_deployer("generic")(
+ emitter, network=network, namespace=namespace
+ ).get_all_hosts()
# check if there are hosts in this namespace
- if len(set(host['provider'] for address, host in hosts)) == 1:
- cloudprovider = hosts[0][1]['provider']
+ if len(set(host["provider"] for address, host in hosts)) == 1:
+ cloudprovider = hosts[0][1]["provider"]
else:
emitter.echo("Found hosts from multiple cloudproviders.")
+ emitter.echo("We can only destroy hosts from one cloudprovider at a time.")
emitter.echo(
- "We can only destroy hosts from one cloudprovider at a time.")
- emitter.echo(
- "Please specify which provider's hosts you'd like to destroy using --cloudprovider (digitalocean or aws)")
+ "Please specify which provider's hosts you'd like to destroy using --cloudprovider (digitalocean or aws)"
+ )
return
deployer = CloudDeployers.get_deployer(cloudprovider)(
- emitter, network=network, namespace=namespace)
+ emitter, network=network, namespace=namespace
+ )
hostnames = [name for name, data in deployer.get_provider_hosts()]
if include_hosts:
@@ -211,32 +358,61 @@ def destroy(cloudprovider, namespace, network, include_hosts):
deployer.destroy_resources(hostnames)
-@cli.command('remove')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Peform this operation on only the named hosts", multiple=True, type=click.STRING)
+@cli.command("remove")
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Peform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def remove(namespace, network, include_hosts):
"""Removes managed resources for the given network/namespace"""
- deployer = CloudDeployers.get_deployer('generic')(
- emitter, network=network, namespace=namespace)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, network=network, namespace=namespace
+ )
hostnames = [name for name, data in deployer.get_all_hosts()]
if include_hosts:
hostnames = include_hosts
emitter.echo(
- f"\nAbout to remove information about the following: {', '.join(hostnames)}, including all local data about these nodes.")
+ f"\nAbout to remove information about the following: {', '.join(hostnames)}, including all local data about these nodes."
+ )
emitter.echo("\ntype 'y' to continue")
- if click.getchar(echo=False) == 'y':
+ if click.getchar(echo=False) == "y":
deployer.remove_resources(hostnames)
-@cli.command('config')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
+@cli.command("config")
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
def config(namespace, network):
"""prints the config path for a given network/namespace"""
- deployer = CloudDeployers.get_deployer('generic')(
- emitter, network=network, namespace=namespace)
- emitter.echo(deployer.config_path)
\ No newline at end of file
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, network=network, namespace=namespace
+ )
+ emitter.echo(deployer.config_path)
diff --git a/nucypher_ops/cli/porter.py b/nucypher_ops/cli/porter.py
index 682a3d0..000a090 100644
--- a/nucypher_ops/cli/porter.py
+++ b/nucypher_ops/cli/porter.py
@@ -1,38 +1,78 @@
-from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK
-from nucypher_ops.ops.fleet_ops import CloudDeployers
import os
+
import click
+
+from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK
+from nucypher_ops.ops.fleet_ops import CloudDeployers
+
emitter = click
-@click.group('porter')
+@click.group("porter")
def cli():
"""deploy and update geth nodes"""
-@cli.command('deploy')
-@click.option('--image', help="The geth image to deploy", default='nucypher/porter:latest')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="additional environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="additional cli arguments for geth", multiple=True, type=click.STRING, default=[])
+@cli.command("deploy")
+@click.option(
+ "--image", help="The geth image to deploy", default="nucypher/porter:latest"
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to update",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="additional environment variables (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="additional cli arguments for geth",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
def deploy(image, namespace, network, include_hosts, envvars, cliargs):
"""Deploys NuCypher on managed hosts."""
- deployer = CloudDeployers.get_deployer('porter')(emitter,
- docker_image=image,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='porter'
- )
+ deployer = CloudDeployers.get_deployer("porter")(
+ emitter,
+ docker_image=image,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="porter",
+ )
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.deploy(hostnames)
diff --git a/nucypher_ops/cli/recover_utils.py b/nucypher_ops/cli/recover_utils.py
index 8715bf1..8fdcc58 100644
--- a/nucypher_ops/cli/recover_utils.py
+++ b/nucypher_ops/cli/recover_utils.py
@@ -1,7 +1,9 @@
import boto3
-def compare_and_remove_common_namespace_data(instance_capture: dict, include_hosts) -> dict:
+def compare_and_remove_common_namespace_data(
+ instance_capture: dict, include_hosts
+) -> dict:
# 1. remove namespace metadata; keys that start with '_'
namespace_metadata = {}
metadata_keys = []
@@ -22,14 +24,18 @@ def compare_and_remove_common_namespace_data(instance_capture: dict, include_hos
if instance_address != comparator_address:
for k in metadata_keys:
if comparator_address_data[k] != instance_data[k]:
- raise ValueError(f"Collected {k} data doesn't match "
- f"{comparator_address} ({comparator_address_data[k]}) vs"
- f"{instance_address} ({instance_data[k]}) ")
+ raise ValueError(
+ f"Collected {k} data doesn't match "
+ f"{comparator_address} ({comparator_address_data[k]}) vs"
+ f"{instance_address} ({instance_data[k]}) "
+ )
return comparator_address_data
-def add_deploy_attributes(instance_capture, include_hosts, ssh_key_path, login_name, ssh_port):
+def add_deploy_attributes(
+ instance_capture, include_hosts, ssh_key_path, login_name, ssh_port
+):
for host in include_hosts:
# add deploy attrs to instance data
deploy_attrs = instance_capture.get("provider_deploy_attrs", list())
@@ -38,34 +44,32 @@ def add_deploy_attributes(instance_capture, include_hosts, ssh_key_path, login_n
entry = (
host,
[
- {'key': 'ansible_ssh_private_key_file', 'value': ssh_key_path},
- {'key': 'default_user', 'value': login_name},
- {'key': 'ansible_port', 'value': ssh_port}
- ]
+ {"key": "ansible_ssh_private_key_file", "value": ssh_key_path},
+ {"key": "default_user", "value": login_name},
+ {"key": "ansible_port", "value": ssh_port},
+ ],
)
deploy_attrs.append(entry)
def collect_aws_pre_config_data(aws_profile, region, ip_address, ssh_key_path) -> dict:
- pre_config_metadata = {
- 'aws-profile': aws_profile, 'aws-region': region
- }
+ pre_config_metadata = {"aws-profile": aws_profile, "aws-region": region}
instance_info = get_aws_instance_info(aws_profile, region, ip_address)
- pre_config_metadata['keypair_path'] = ssh_key_path
- pre_config_metadata['keypair'] = instance_info['KeyName']
+ pre_config_metadata["keypair_path"] = ssh_key_path
+ pre_config_metadata["keypair"] = instance_info["KeyName"]
- vpc_id = instance_info['VpcId']
- pre_config_metadata['Vpc'] = vpc_id
- subnet_id = instance_info['SubnetId']
- pre_config_metadata['Subnet'] = subnet_id
- pre_config_metadata['SecurityGroup'] = instance_info['SecurityGroups'][0]['GroupId']
+ vpc_id = instance_info["VpcId"]
+ pre_config_metadata["Vpc"] = vpc_id
+ subnet_id = instance_info["SubnetId"]
+ pre_config_metadata["Subnet"] = subnet_id
+ pre_config_metadata["SecurityGroup"] = instance_info["SecurityGroups"][0]["GroupId"]
internet_gateway_info = get_aws_internet_gateway_info(aws_profile, region, vpc_id)
- pre_config_metadata['InternetGateway'] = internet_gateway_info['InternetGatewayId']
+ pre_config_metadata["InternetGateway"] = internet_gateway_info["InternetGatewayId"]
route_table_info = get_aws_route_table_info(aws_profile, region, subnet_id)
- pre_config_metadata['RouteTable'] = route_table_info['RouteTableId']
+ pre_config_metadata["RouteTable"] = route_table_info["RouteTableId"]
return pre_config_metadata
@@ -73,17 +77,12 @@ def collect_aws_pre_config_data(aws_profile, region, ip_address, ssh_key_path) -
def get_aws_instance_info(aws_profile, aws_region, ip_address) -> dict:
# aws ec2 describe-instances --filters Name=ip-address,Values=
aws_session = boto3.Session(profile_name=aws_profile, region_name=aws_region)
- ec2Client = aws_session.client('ec2')
+ ec2Client = aws_session.client("ec2")
result = ec2Client.describe_instances(
- Filters=[
- {
- 'Name': 'ip-address',
- 'Values': [ip_address]
- }
- ]
+ Filters=[{"Name": "ip-address", "Values": [ip_address]}]
)
- instance_info = result['Reservations'][0]['Instances'][0]
+ instance_info = result["Reservations"][0]["Instances"][0]
return instance_info
@@ -91,23 +90,23 @@ def get_aws_instance_info(aws_profile, aws_region, ip_address) -> dict:
def get_aws_internet_gateway_info(aws_profile, aws_region, vpc_id) -> dict:
# aws ec2 describe-internet-gateways --filters Name=attachment.vpc-id,Values=
aws_session = boto3.Session(profile_name=aws_profile, region_name=aws_region)
- ec2Client = aws_session.client('ec2')
+ ec2Client = aws_session.client("ec2")
internet_gateway_info = ec2Client.describe_internet_gateways(
Filters=[
- {'Values': [vpc_id], 'Name': 'attachment.vpc-id'},
+ {"Values": [vpc_id], "Name": "attachment.vpc-id"},
]
- )['InternetGateways'][0]
+ )["InternetGateways"][0]
return internet_gateway_info
def get_aws_route_table_info(aws_profile, aws_region, subnet_id) -> dict:
# aws ec2 describe-route-tables --filters Name=association.subnet-id,Values=
aws_session = boto3.Session(profile_name=aws_profile, region_name=aws_region)
- ec2Client = aws_session.client('ec2')
+ ec2Client = aws_session.client("ec2")
route_table_info = ec2Client.describe_route_tables(
Filters=[
- {'Values': [subnet_id], 'Name': 'association.subnet-id'},
+ {"Values": [subnet_id], "Name": "association.subnet-id"},
]
- )['RouteTables'][0]
+ )["RouteTables"][0]
return route_table_info
diff --git a/nucypher_ops/cli/tbtcv2.py b/nucypher_ops/cli/tbtcv2.py
index 9325de0..285f856 100644
--- a/nucypher_ops/cli/tbtcv2.py
+++ b/nucypher_ops/cli/tbtcv2.py
@@ -1,150 +1,318 @@
+import os
from pathlib import Path
+import click
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory.manager import InventoryManager
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
-from nucypher_ops.cli.recover_utils import compare_and_remove_common_namespace_data, \
- add_deploy_attributes, collect_aws_pre_config_data, get_aws_instance_info
+from nucypher_ops.cli.recover_utils import (
+ add_deploy_attributes,
+ collect_aws_pre_config_data,
+ compare_and_remove_common_namespace_data,
+ get_aws_instance_info,
+)
from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK, PLAYBOOKS
from nucypher_ops.ops.ansible_utils import AnsiblePlayBookResultsCollector
from nucypher_ops.ops.fleet_ops import CloudDeployers
-import os
-import click
+
emitter = click
-@click.group('tbtcv2')
+@click.group("tbtcv2")
def cli():
"""deploy tbtcv2/random beacon nodes"""
-@cli.command('stage')
-@click.option('--image', help="The docker image to deploy", default='keepnetwork/keep-client:latest')
-@click.option('--namespace', help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to target", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="Environment variables used during execution (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="additional cli launching arguments", multiple=True, type=click.STRING, default=[])
+@cli.command("stage")
+@click.option(
+ "--image",
+ help="The docker image to deploy",
+ default="keepnetwork/keep-client:latest",
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to target",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="Environment variables used during execution (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="additional cli launching arguments",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
def stage(image, namespace, network, include_hosts, envvars, cliargs):
"""Set up and configure tbtcv2 node but don't run it"""
- deployer = CloudDeployers.get_deployer('tbtcv2')(emitter,
- docker_image=image,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='tbtcv2')
-
- hostnames = deployer.config['instances'].keys()
+ deployer = CloudDeployers.get_deployer("tbtcv2")(
+ emitter,
+ docker_image=image,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="tbtcv2",
+ )
+
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.stage_nodes(hostnames)
-@cli.command('run')
-@click.option('--image', help="The docker image to deploy", default='keepnetwork/keep-client:latest')
-@click.option('--namespace', help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to target", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="Environment variables used during execution (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="additional cli launching arguments", multiple=True, type=click.STRING, default=[])
+@cli.command("run")
+@click.option(
+ "--image",
+ help="The docker image to deploy",
+ default="keepnetwork/keep-client:latest",
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to target",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="Environment variables used during execution (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="additional cli launching arguments",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
def run(image, namespace, network, include_hosts, envvars, cliargs):
"""Start tbtcv2 node."""
- deployer = CloudDeployers.get_deployer('tbtcv2')(emitter,
- docker_image=image,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='tbtcv2')
-
- hostnames = deployer.config['instances'].keys()
+ deployer = CloudDeployers.get_deployer("tbtcv2")(
+ emitter,
+ docker_image=image,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="tbtcv2",
+ )
+
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.run_nodes(hostnames)
-@cli.command('operator-address')
-@click.option('--image', help="The docker image to deploy", default='keepnetwork/keep-client:latest')
-@click.option('--namespace', help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to target", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="Environment variables used during execution (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="additional cli launching arguments", multiple=True, type=click.STRING, default=[])
+@cli.command("operator-address")
+@click.option(
+ "--image",
+ help="The docker image to deploy",
+ default="keepnetwork/keep-client:latest",
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to target",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="Environment variables used during execution (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="additional cli launching arguments",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
def operator_address(image, namespace, network, include_hosts, envvars, cliargs):
"""Determine operator address for specified hosts"""
- deployer = CloudDeployers.get_deployer('tbtcv2')(emitter,
- docker_image=image,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='tbtcv2')
-
- hostnames = deployer.config['instances'].keys()
+ deployer = CloudDeployers.get_deployer("tbtcv2")(
+ emitter,
+ docker_image=image,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="tbtcv2",
+ )
+
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.get_operator_address(hostnames)
-@cli.command('stop')
-@click.option('--namespace', help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to target", multiple=True, type=click.STRING)
+@cli.command("stop")
+@click.option(
+ "--namespace",
+ help="Namespace for these nodes. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to target",
+ multiple=True,
+ type=click.STRING,
+)
def stop(namespace, network, include_hosts):
"""Stop tbtcv2 node(s)"""
- deployer = CloudDeployers.get_deployer('tbtcv2')(emitter,
- namespace=namespace,
- network=network,
- resource_name='tbtcv2')
- hostnames = deployer.config['instances'].keys()
+ deployer = CloudDeployers.get_deployer("tbtcv2")(
+ emitter, namespace=namespace, network=network, resource_name="tbtcv2"
+ )
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.stop_nodes(hostnames)
-@cli.command('fund')
-@click.option('--amount', help="The amount to fund each node. Default is .003", type=click.FLOAT, default=.003)
-@click.option('--namespace',
- help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
- type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING,
- default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Perform this operation on only the named hosts", multiple=True,
- type=click.STRING)
+@cli.command("fund")
+@click.option(
+ "--amount",
+ help="The amount to fund each node. Default is .003",
+ type=click.FLOAT,
+ default=0.003,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Perform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def fund(amount, namespace, network, include_hosts):
"""
Fund remote nodes automatically using a locally managed burner wallet
"""
- deployer = CloudDeployers.get_deployer('tbtcv2')(emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("tbtcv2")(
+ emitter, namespace=namespace, network=network
+ )
if deployer.has_wallet:
- if password := os.getenv('NUCYPHER_OPS_LOCAL_ETH_PASSWORD'):
+ if password := os.getenv("NUCYPHER_OPS_LOCAL_ETH_PASSWORD"):
emitter.echo("found local eth password in environment variable")
else:
- password = click.prompt('Please enter the wallet password you saved for this account', hide_input=True)
+ password = click.prompt(
+ "Please enter the wallet password you saved for this account",
+ hide_input=True,
+ )
else:
emitter.echo("Creating a new wallet to fund your nodes.")
- if password := os.getenv('NUCYPHER_OPS_LOCAL_ETH_PASSWORD'):
+ if password := os.getenv("NUCYPHER_OPS_LOCAL_ETH_PASSWORD"):
emitter.echo("found local eth password in environment variable")
else:
- password = click.prompt('please enter a password for this new eth wallet', hide_input=True)
- passwordagain = click.prompt('please enter the same password again', hide_input=True)
+ password = click.prompt(
+ "please enter a password for this new eth wallet", hide_input=True
+ )
+ passwordagain = click.prompt(
+ "please enter the same password again", hide_input=True
+ )
if not password == passwordagain:
raise AttributeError("passwords dont' match please try again.")
@@ -153,91 +321,167 @@ def fund(amount, namespace, network, include_hosts):
balance = deployer.get_wallet_balance(wallet.address, eth=True)
emitter.echo(f"balance: {deployer.get_wallet_balance(wallet.address)}")
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
emitter.echo(f"funding {len(hostnames)} nodes with {amount} ETH each.")
if balance < amount * len(hostnames):
emitter.echo(
- f"balance on local wallet ({balance} ETH) is not enough to fund {len(hostnames)} with {amount} ETH. Add more funds to local wallet ({wallet.address})")
+ f"balance on local wallet ({balance} ETH) is not enough to fund {len(hostnames)} with {amount} ETH. Add more funds to local wallet ({wallet.address})"
+ )
return
deployer.fund_nodes(wallet, hostnames, amount)
-@cli.command('defund')
-@click.option('--amount', help="The amount to defund. Default is the entire balance of the node's wallet.",
- type=click.FLOAT, default=None)
-@click.option('--to-address', help="To which ETH address are you sending the proceeds?", required=True)
-@click.option('--namespace',
- help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
- type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING,
- default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Peform this operation on only the named hosts", multiple=True,
- type=click.STRING)
+@cli.command("defund")
+@click.option(
+ "--amount",
+ help="The amount to defund. Default is the entire balance of the node's wallet.",
+ type=click.FLOAT,
+ default=None,
+)
+@click.option(
+ "--to-address",
+ help="To which ETH address are you sending the proceeds?",
+ required=True,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Peform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def defund(amount, to_address, namespace, network, include_hosts):
"""Transfer remaining ETH balance from operator address to another address"""
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
deployer.defund_nodes(hostnames, to=to_address, amount=amount)
-@cli.command('recover-node-config')
-@click.option('--include-host', 'include_hosts', help="specify hosts to recover", multiple=True, required=True, type=click.STRING)
-@click.option('--provider', help="The cloud provider host(s) are running on", multiple=False, required=True, type=click.Choice(['digitalocean', 'aws']))
-@click.option('--aws-profile', help="The AWS profile name to use when interacting with remote node", required=False)
-@click.option('--namespace', help="Namespace for these operations", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="Network that the node is running on", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--login-name', help="The name username of a user with root privileges we can ssh as on the host.", default="root")
-@click.option('--key-path', 'ssh_key_path', help="The path to a keypair we will need to ssh into this host (default: ~/.ssh/id_rsa)", default="~/.ssh/id_rsa")
-@click.option('--ssh-port', help="The port this host's ssh daemon is listening on (default: 22)", default=22)
-def recover_node_config(include_hosts, provider, aws_profile, namespace, network, login_name, ssh_key_path, ssh_port):
+@cli.command("recover-node-config")
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to recover",
+ multiple=True,
+ required=True,
+ type=click.STRING,
+)
+@click.option(
+ "--provider",
+ help="The cloud provider host(s) are running on",
+ multiple=False,
+ required=True,
+ type=click.Choice(["digitalocean", "aws"]),
+)
+@click.option(
+ "--aws-profile",
+ help="The AWS profile name to use when interacting with remote node",
+ required=False,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="Network that the node is running on",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--login-name",
+ help="The name username of a user with root privileges we can ssh as on the host.",
+ default="root",
+)
+@click.option(
+ "--key-path",
+ "ssh_key_path",
+ help="The path to a keypair we will need to ssh into this host (default: ~/.ssh/id_rsa)",
+ default="~/.ssh/id_rsa",
+)
+@click.option(
+ "--ssh-port",
+ help="The port this host's ssh daemon is listening on (default: 22)",
+ default=22,
+)
+def recover_node_config(
+ include_hosts,
+ provider,
+ aws_profile,
+ namespace,
+ network,
+ login_name,
+ ssh_key_path,
+ ssh_port,
+):
"""Regenerate previously lost/deleted node config(s)"""
- if (provider == 'aws') ^ bool(aws_profile):
- raise click.BadOptionUsage('--aws-profile', f"Expected both '--aws-profile ' and '--provider aws' to be specified; got ({aws_profile}, {provider})")
- if provider == 'aws' and login_name != 'ubuntu':
- result = emitter.confirm(f"When using AWS the expectation is that the login name would be 'ubuntu' and not '{login_name}'. Are you sure you want to continue using '{login_name}'?")
+ if (provider == "aws") ^ bool(aws_profile):
+ raise click.BadOptionUsage(
+ "--aws-profile",
+ f"Expected both '--aws-profile ' and '--provider aws' to be specified; got ({aws_profile}, {provider})",
+ )
+ if provider == "aws" and login_name != "ubuntu":
+ result = emitter.confirm(
+ f"When using AWS the expectation is that the login name would be 'ubuntu' and not '{login_name}'. Are you sure you want to continue using '{login_name}'?"
+ )
if not result:
- raise click.BadOptionUsage('--login-name', "Incorrect use of '--login-name'")
+ raise click.BadOptionUsage(
+ "--login-name", "Incorrect use of '--login-name'"
+ )
- playbook = Path(PLAYBOOKS).joinpath('recover_tbtcv2_ops_data.yml')
+ playbook = Path(PLAYBOOKS).joinpath("recover_tbtcv2_ops_data.yml")
instance_capture = {
- 'InstanceId': [],
- 'publicaddress': [],
- 'host_nickname': [],
- 'eth_provider': [],
- 'docker_image': [],
- 'operator address': [],
- 'nickname': [],
- 'rest url': [],
-
+ "InstanceId": [],
+ "publicaddress": [],
+ "host_nickname": [],
+ "eth_provider": [],
+ "docker_image": [],
+ "operator address": [],
+ "nickname": [],
+ "rest url": [],
# non-instance dictionary data
- '_ssh-fingerprint': [],
- '_instance-region': [],
- '_operator-password': [],
+ "_ssh-fingerprint": [],
+ "_instance-region": [],
+ "_operator-password": [],
}
- inventory_host_list = '{},'.format(",".join(include_hosts))
+ inventory_host_list = "{},".format(",".join(include_hosts))
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=inventory_host_list)
+ inventory = InventoryManager(loader=loader, sources=inventory_host_list)
hosts = inventory.get_hosts()
for host in hosts:
- host.set_variable('ansible_ssh_private_key_file', ssh_key_path)
- host.set_variable('default_user', login_name)
- host.set_variable('ansible_port', ssh_port)
- host.set_variable('ansible_connection', 'ssh')
- host.set_variable('cloud_provider', provider) # aws / digital ocean
+ host.set_variable("ansible_ssh_private_key_file", ssh_key_path)
+ host.set_variable("default_user", login_name)
+ host.set_variable("ansible_port", ssh_port)
+ host.set_variable("ansible_connection", "ssh")
+ host.set_variable("cloud_provider", provider) # aws / digital ocean
callback = AnsiblePlayBookResultsCollector(
- sock=emitter,
- return_results=instance_capture
+ sock=emitter, return_results=instance_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
@@ -255,55 +499,61 @@ def recover_node_config(include_hosts, provider, aws_profile, namespace, network
#
# Process data capture
# 1. remove namespace metadata; keys that start with '_'
- comparator_address_data = compare_and_remove_common_namespace_data(instance_capture, include_hosts)
+ comparator_address_data = compare_and_remove_common_namespace_data(
+ instance_capture, include_hosts
+ )
# 2. add deploy attributes
- add_deploy_attributes(instance_capture, include_hosts, ssh_key_path, login_name, ssh_port)
+ add_deploy_attributes(
+ instance_capture, include_hosts, ssh_key_path, login_name, ssh_port
+ )
pre_config_metadata = {
- "namespace": f'{namespace}-{network}',
+ "namespace": f"{namespace}-{network}",
"keystorepassword": "N/A (recovery mode)",
- "ethpassword": comparator_address_data['_operator-password'],
+ "ethpassword": comparator_address_data["_operator-password"],
"keystoremnemonic": "N/A (recovery mode)",
- "sshkey": comparator_address_data['_ssh-fingerprint'],
+ "sshkey": comparator_address_data["_ssh-fingerprint"],
}
# 3. Provider information
- region = comparator_address_data['_instance-region']
- if provider == 'digitalocean':
- pre_config_metadata['digital-ocean-region'] = region
+ region = comparator_address_data["_instance-region"]
+ if provider == "digitalocean":
+ pre_config_metadata["digital-ocean-region"] = region
# DO access token
digital_access_token = emitter.prompt(
- f"Please enter your Digital Ocean Access Token which can be created here: https://cloud.digitalocean.com/account/api/tokens. It looks like this: b34abcDEF17ABCDEFAbcDEf09fd72a28425ABCDEF8b198e9623ABCDEFc11591")
+ "Please enter your Digital Ocean Access Token which can be created here: https://cloud.digitalocean.com/account/api/tokens. "
+ "It looks like this: b34abcDEF17ABCDEFAbcDEf09fd72a28425ABCDEF8b198e9623ABCDEFc11591"
+ )
if not digital_access_token:
- raise AttributeError(
- "Could not continue without Access Token")
- pre_config_metadata['digital-ocean-access-token'] = digital_access_token
+ raise AttributeError("Could not continue without Access Token")
+ pre_config_metadata["digital-ocean-access-token"] = digital_access_token
else:
- aws_config_data = collect_aws_pre_config_data(aws_profile, region, include_hosts[0],
- ssh_key_path)
+ aws_config_data = collect_aws_pre_config_data(
+ aws_profile, region, include_hosts[0], ssh_key_path
+ )
pre_config_metadata.update(aws_config_data)
# set up pre-config instances
node_names = []
instances_dict = {}
- if provider == 'aws':
+ if provider == "aws":
# must update 'nickname' and 'host_nickname' entries - aws nicknames are local
- instance_capture['nickname'].clear()
+ instance_capture["nickname"].clear()
- old_host_nicknames = instance_capture.pop('host_nickname')
- instance_capture['host_nickname'] = []
+ old_host_nicknames = instance_capture.pop("host_nickname")
+ instance_capture["host_nickname"] = []
for ip_address, host_nickname in old_host_nicknames:
- if provider == 'aws':
+ if provider == "aws":
# update nickname for AWS
instance_info = get_aws_instance_info(aws_profile, region, ip_address)
- host_nickname = instance_info['Tags'][0]['Value']
- instance_capture['nickname'].append((ip_address, host_nickname))
+ host_nickname = instance_info["Tags"][0]["Value"]
+ instance_capture["nickname"].append((ip_address, host_nickname))
# either update for AWS or leave the same for DigitalOcean
- instance_capture['host_nickname'].append((ip_address, host_nickname))
+ instance_capture["host_nickname"].append((ip_address, host_nickname))
instances_dict[host_nickname] = {
"publicaddress": ip_address,
"installed": ["tbtcv2"],
@@ -320,7 +570,7 @@ def recover_node_config(include_hosts, provider, aws_profile, namespace, network
namespace=namespace,
network=network,
pre_config=pre_config_metadata,
- resource_name='tbtcv2'
+ resource_name="tbtcv2",
)
# regenerate instance configuration file
diff --git a/nucypher_ops/cli/ursula.py b/nucypher_ops/cli/ursula.py
index 106d796..a2f4e6d 100644
--- a/nucypher_ops/cli/ursula.py
+++ b/nucypher_ops/cli/ursula.py
@@ -8,10 +8,10 @@
from ansible.vars.manager import VariableManager
from nucypher_ops.cli.recover_utils import (
- compare_and_remove_common_namespace_data,
add_deploy_attributes,
+ collect_aws_pre_config_data,
+ compare_and_remove_common_namespace_data,
get_aws_instance_info,
- collect_aws_pre_config_data
)
from nucypher_ops.constants import DEFAULT_NAMESPACE, DEFAULT_NETWORK, PLAYBOOKS
from nucypher_ops.ops.ansible_utils import AnsiblePlayBookResultsCollector
@@ -20,123 +20,305 @@
emitter = click
-@click.group('ursula')
+@click.group("ursula")
def cli():
"""deploy and update ursula nodes"""
-@cli.command('deploy')
-@click.option('--polygon-endpoint', help="The polygon L2 blockchain provider for the remote node.", default=None)
-@click.option('--eth-endpoint', help="The ethereum blockchain provider for the remote node.", default=None)
-@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default='nucypher/nucypher:latest')
-@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=None, is_flag=True)
-@click.option('--init', help="Clear your nucypher config and start a fresh node with new keys", default=False, is_flag=True)
-@click.option('--migrate', help="Migrate nucypher nodes between compatibility breaking versions", default=False, is_flag=True)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="cli arguments for 'nucypher run': eg.'--max-gas-price 50'/'--c max-gas-price=50'", multiple=True, type=click.STRING, default=[])
-def deploy(polygon_endpoint, eth_endpoint, nucypher_image, seed_network, init, migrate,
- namespace, network, include_hosts, envvars, cliargs):
+@cli.command("deploy")
+@click.option(
+ "--polygon-endpoint",
+ help="The polygon L2 blockchain provider for the remote node.",
+ default=None,
+)
+@click.option(
+ "--eth-endpoint",
+ help="The ethereum blockchain provider for the remote node.",
+ default=None,
+)
+@click.option(
+ "--nucypher-image",
+ help="The docker image containing the nucypher code to run on the remote nodes.",
+ default="nucypher/nucypher:latest",
+)
+@click.option(
+ "--seed-network",
+ help="Do you want the 1st node to be --lonely and act as a seed node for this network",
+ default=None,
+ is_flag=True,
+)
+@click.option(
+ "--init",
+ help="Clear your nucypher config and start a fresh node with new keys",
+ default=False,
+ is_flag=True,
+)
+@click.option(
+ "--migrate",
+ help="Migrate nucypher nodes between compatibility breaking versions",
+ default=False,
+ is_flag=True,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to update",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="environment variables (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="cli arguments for 'nucypher run': eg.'--max-gas-price 50'/'--c max-gas-price=50'",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+def deploy(
+ polygon_endpoint,
+ eth_endpoint,
+ nucypher_image,
+ seed_network,
+ init,
+ migrate,
+ namespace,
+ network,
+ include_hosts,
+ envvars,
+ cliargs,
+):
"""Deploys NuCypher on managed hosts."""
- deployer = CloudDeployers.get_deployer('generic')(emitter,
- seed_network=seed_network,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='nucypher',
- eth_endpoint=eth_endpoint,
- polygon_endpoint=polygon_endpoint,
- docker_image=nucypher_image,
- )
-
- hostnames = deployer.config['instances'].keys()
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter,
+ seed_network=seed_network,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="nucypher",
+ eth_endpoint=eth_endpoint,
+ polygon_endpoint=polygon_endpoint,
+ docker_image=nucypher_image,
+ )
+
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.deploy_nucypher_on_existing_nodes(
- hostnames, migrate_nucypher=migrate, init=init)
-
-
-@cli.command('update')
-@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default=None)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING)
-@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
-@click.option('--cli', '-c', 'cliargs', help="cli arguments for 'nucypher run': eg.'--max-gas-price 50'/'--c max-gas-price=50'", multiple=True, type=click.STRING, default=[])
-@click.option('--eth-endpoint', help="The ethereum blockchain provider for the remote node.", default=None)
-@click.option('--polygon-endpoint', help="The polygon L2 blockchain provider for the remote node.", default=None)
-def update(nucypher_image, namespace, network, include_hosts, envvars, cliargs, eth_endpoint, polygon_endpoint):
+ hostnames, migrate_nucypher=migrate, init=init
+ )
+
+
+@cli.command("update")
+@click.option(
+ "--nucypher-image",
+ help="The docker image containing the nucypher code to run on the remote nodes.",
+ default=None,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to update",
+ multiple=True,
+ type=click.STRING,
+)
+@click.option(
+ "--env",
+ "-e",
+ "envvars",
+ help="environment variables (ENVVAR=VALUE)",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--cli",
+ "-c",
+ "cliargs",
+ help="cli arguments for 'nucypher run': eg.'--max-gas-price 50'/'--c max-gas-price=50'",
+ multiple=True,
+ type=click.STRING,
+ default=[],
+)
+@click.option(
+ "--eth-endpoint",
+ help="The ethereum blockchain provider for the remote node.",
+ default=None,
+)
+@click.option(
+ "--polygon-endpoint",
+ help="The polygon L2 blockchain provider for the remote node.",
+ default=None,
+)
+def update(
+ nucypher_image,
+ namespace,
+ network,
+ include_hosts,
+ envvars,
+ cliargs,
+ eth_endpoint,
+ polygon_endpoint,
+):
"""Update images and change cli/env options on already running hosts"""
- deployer = CloudDeployers.get_deployer('generic')(emitter,
- namespace=namespace,
- network=network,
- envvars=envvars,
- cliargs=cliargs,
- resource_name='nucypher',
- docker_image=nucypher_image,
- eth_endpoint=eth_endpoint,
- polygon_endpoint=polygon_endpoint
- )
-
- hostnames = deployer.config['instances'].keys()
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter,
+ namespace=namespace,
+ network=network,
+ envvars=envvars,
+ cliargs=cliargs,
+ resource_name="nucypher",
+ docker_image=nucypher_image,
+ eth_endpoint=eth_endpoint,
+ polygon_endpoint=polygon_endpoint,
+ )
+
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
- for name, hostdata in [(n, d) for n, d in deployer.config['instances'].items() if n in hostnames]:
+ for name, hostdata in [
+ (n, d) for n, d in deployer.config["instances"].items() if n in hostnames
+ ]:
emitter.echo(f'\t{name}: {hostdata["publicaddress"]}', color="yellow")
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
deployer.update_nucypher_on_existing_nodes(hostnames)
-@cli.command('status')
-@click.option('--fast', help="Only call blockchain and http methods, skip ssh into each node", default=None, is_flag=True)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Peform this operation on only the named hosts", multiple=True, type=click.STRING)
+@cli.command("status")
+@click.option(
+ "--fast",
+ help="Only call blockchain and http methods, skip ssh into each node",
+ default=None,
+ is_flag=True,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Peform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def status(fast, namespace, network, include_hosts):
"""Displays ursula status and updates worker data in stakeholder config"""
- deployer = CloudDeployers.get_deployer('generic')(
- emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
deployer.get_worker_status(hostnames, fast=fast)
-@cli.command('fund')
-@click.option('--amount', help="The amount to fund each node. Default is .003", type=click.FLOAT, default=.003)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Peform this operation on only the named hosts", multiple=True, type=click.STRING)
+@cli.command("fund")
+@click.option(
+ "--amount",
+ help="The amount to fund each node. Default is .003",
+ type=click.FLOAT,
+ default=0.003,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Peform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def fund(amount, namespace, network, include_hosts):
"""
fund remote nodes automatically using a locally managed burner wallet
"""
-
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
if deployer.has_wallet:
- if password := os.getenv('NUCYPHER_OPS_LOCAL_ETH_PASSWORD'):
+ if password := os.getenv("NUCYPHER_OPS_LOCAL_ETH_PASSWORD"):
emitter.echo("found local eth password in environment variable")
else:
- password = click.prompt('Please enter the wallet password you saved for this account', hide_input=True)
+ password = click.prompt(
+ "Please enter the wallet password you saved for this account",
+ hide_input=True,
+ )
else:
emitter.echo("Creating a new wallet to fund your nodes.")
- if password := os.getenv('NUCYPHER_OPS_LOCAL_ETH_PASSWORD'):
+ if password := os.getenv("NUCYPHER_OPS_LOCAL_ETH_PASSWORD"):
emitter.echo("found local eth password in environment variable")
- else:
- password = click.prompt('please enter a password for this new eth wallet', hide_input=True)
- passwordagain = click.prompt('please enter the same password again', hide_input=True)
+ else:
+ password = click.prompt(
+ "please enter a password for this new eth wallet", hide_input=True
+ )
+ passwordagain = click.prompt(
+ "please enter the same password again", hide_input=True
+ )
if not password == passwordagain:
raise AttributeError("passwords dont' match please try again.")
@@ -145,150 +327,300 @@ def fund(amount, namespace, network, include_hosts):
balance = deployer.get_wallet_balance(wallet.address, eth=True)
emitter.echo(f"balance: {deployer.get_wallet_balance(wallet.address)}")
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
emitter.echo(f"funding {len(hostnames)} nodes with {amount} ETH each.")
if balance < amount * len(hostnames):
- emitter.echo(f"balance on local wallet ({balance} ETH) is not enough to fund {len(hostnames)} with {amount} ETH. Add more funds to local wallet ({wallet.address})")
+ emitter.echo(
+ f"balance on local wallet ({balance} ETH) is not enough to fund {len(hostnames)} with {amount} ETH. Add more funds to local wallet ({wallet.address})"
+ )
return
deployer.fund_nodes(wallet, hostnames, amount)
-@cli.command('defund')
-@click.option('--amount', help="The amount to defund. Default is the entire balance of the node's wallet.", type=click.FLOAT, default=None)
-@click.option('--to-address', help="To which ETH address are you sending the proceeds?", required=True)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', 'include_hosts', help="Peform this operation on only the named hosts", multiple=True, type=click.STRING)
+@cli.command("defund")
+@click.option(
+ "--amount",
+ help="The amount to defund. Default is the entire balance of the node's wallet.",
+ type=click.FLOAT,
+ default=None,
+)
+@click.option(
+ "--to-address",
+ help="To which ETH address are you sending the proceeds?",
+ required=True,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="Peform this operation on only the named hosts",
+ multiple=True,
+ type=click.STRING,
+)
def defund(amount, to_address, namespace, network, include_hosts):
"""Transfer remaining ETH balance from operator address to another address"""
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
deployer.defund_nodes(hostnames, to=to_address, amount=amount)
-@cli.command('show-backupdir')
-@click.option('--verbose', '-v', help="include node nick names", is_flag=True)
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
+@cli.command("show-backupdir")
+@click.option("--verbose", "-v", help="include node nick names", is_flag=True)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
def backupdir(verbose, namespace, network):
"""Display backup directory for hosts within network and namespace"""
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
- hostnames = deployer.config['instances'].keys()
- for hostname in hostnames:
- prefix = f'{hostname}:' if verbose else ''
- emitter.echo(f'{prefix} {deployer.get_backup_path_by_nickname(hostname)}')
-
-
-@cli.command('restore')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--target-host', 'target_host', help="The nickname of the host where we are putting the restored state. (try `nucypher-ops nodes list` )", multiple=False, type=click.STRING)
-@click.option('--source-path', 'source_path', help="The absolute path on disk to the backup data you are restoring", type=click.STRING, required=False)
-@click.option('--source-nickname', 'source_nickname', help="The nickname of the node whose data you are moving to the new machine", type=click.STRING, required=False)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
+ hostnames = deployer.config["instances"].keys()
+ for hostname in hostnames:
+ prefix = f"{hostname}:" if verbose else ""
+ emitter.echo(f"{prefix} {deployer.get_backup_path_by_nickname(hostname)}")
+
+
+@cli.command("restore")
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts will run on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--target-host",
+ "target_host",
+ help="The nickname of the host where we are putting the restored state. (try `nucypher-ops nodes list` )",
+ multiple=False,
+ type=click.STRING,
+)
+@click.option(
+ "--source-path",
+ "source_path",
+ help="The absolute path on disk to the backup data you are restoring",
+ type=click.STRING,
+ required=False,
+)
+@click.option(
+ "--source-nickname",
+ "source_nickname",
+ help="The nickname of the node whose data you are moving to the new machine",
+ type=click.STRING,
+ required=False,
+)
def restore(namespace, network, target_host, source_path, source_nickname):
"""Restores a backup of a worker to an existing host"""
if not source_path and not source_nickname:
- emitter.echo("You must either specify the path to a backup on disk (ie. `/Users/Alice/Library/Application Support/nucypher-ops/configs/), or the name of an existing ursula config (ie. `mainnet-nucypher-1`")
+ emitter.echo(
+ "You must either specify the path to a backup on disk (ie. `/Users/Alice/Library/Application Support/nucypher-ops/configs/),"
+ " or the name of an existing ursula config (ie. `mainnet-nucypher-1`"
+ )
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
deployer.restore_from_backup(target_host, source_path)
-@cli.command('backup')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts are running on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', help="The nickname of the host to backup", multiple=False, type=click.STRING, required=False)
+@cli.command("backup")
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts are running on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ help="The nickname of the host to backup",
+ multiple=False,
+ type=click.STRING,
+ required=False,
+)
def backup(namespace, network, include_host):
"""Stores a backup of a worker running on an existing host"""
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
if include_host:
hostnames = [include_host]
else:
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
deployer.backup_remote_data(node_names=hostnames)
-@cli.command('stop')
-@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--network', help="The Nucypher network name these hosts are running on.", type=click.STRING, default=DEFAULT_NETWORK)
-@click.option('--include-host', help="The nickname of the host to backup", multiple=False, type=click.STRING, required=False)
+@cli.command("stop")
+@click.option(
+ "--namespace",
+ help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--network",
+ help="The Nucypher network name these hosts are running on.",
+ type=click.STRING,
+ default=DEFAULT_NETWORK,
+)
+@click.option(
+ "--include-host",
+ help="The nickname of the host to backup",
+ multiple=False,
+ type=click.STRING,
+ required=False,
+)
def stop(namespace, network, include_host):
"""Stop worker running on an existing host"""
- deployer = CloudDeployers.get_deployer('generic')(emitter, namespace=namespace, network=network)
+ deployer = CloudDeployers.get_deployer("generic")(
+ emitter, namespace=namespace, network=network
+ )
if include_host:
hostnames = [include_host]
else:
- hostnames = deployer.config['instances'].keys()
+ hostnames = deployer.config["instances"].keys()
deployer.stop_worker_process(node_names=hostnames)
-@cli.command('recover-node-config')
-@click.option('--include-host', 'include_hosts', help="specify hosts to recover", multiple=True, required=True, type=click.STRING)
-@click.option('--provider', help="The cloud provider host(s) are running on", multiple=False, required=True, type=click.Choice(['digitalocean', 'aws']))
-@click.option('--aws-profile', help="The AWS profile name to use when interacting with remote node", required=False)
-@click.option('--namespace', help="Namespace for these operations", type=click.STRING, default=DEFAULT_NAMESPACE)
-@click.option('--login-name', help="The name username of a user with root privileges we can ssh as on the host.", default="root")
-@click.option('--key-path', 'ssh_key_path', help="The path to a keypair we will need to ssh into this host (default: ~/.ssh/id_rsa)", default="~/.ssh/id_rsa")
-@click.option('--ssh-port', help="The port this host's ssh daemon is listening on (default: 22)", default=22)
-def recover_node_config(include_hosts, namespace, provider, aws_profile, login_name, ssh_key_path, ssh_port):
+@cli.command("recover-node-config")
+@click.option(
+ "--include-host",
+ "include_hosts",
+ help="specify hosts to recover",
+ multiple=True,
+ required=True,
+ type=click.STRING,
+)
+@click.option(
+ "--provider",
+ help="The cloud provider host(s) are running on",
+ multiple=False,
+ required=True,
+ type=click.Choice(["digitalocean", "aws"]),
+)
+@click.option(
+ "--aws-profile",
+ help="The AWS profile name to use when interacting with remote node",
+ required=False,
+)
+@click.option(
+ "--namespace",
+ help="Namespace for these operations",
+ type=click.STRING,
+ default=DEFAULT_NAMESPACE,
+)
+@click.option(
+ "--login-name",
+ help="The name username of a user with root privileges we can ssh as on the host.",
+ default="root",
+)
+@click.option(
+ "--key-path",
+ "ssh_key_path",
+ help="The path to a keypair we will need to ssh into this host (default: ~/.ssh/id_rsa)",
+ default="~/.ssh/id_rsa",
+)
+@click.option(
+ "--ssh-port",
+ help="The port this host's ssh daemon is listening on (default: 22)",
+ default=22,
+)
+def recover_node_config(
+ include_hosts, namespace, provider, aws_profile, login_name, ssh_key_path, ssh_port
+):
"""Regenerate previously lost/deleted node config(s)"""
- if (provider == 'aws') ^ bool(aws_profile):
- raise click.BadOptionUsage('--aws-profile', f"Expected both '--aws-profile ' and '--provider aws' to be specified; got ({aws_profile}, {provider})")
- if provider == 'aws' and login_name != 'ubuntu':
- result = emitter.confirm(f"When using AWS the expectation is that the login name would be 'ubuntu' and not '{login_name}'. Are you sure you want to continue using '{login_name}'?")
+ if (provider == "aws") ^ bool(aws_profile):
+ raise click.BadOptionUsage(
+ "--aws-profile",
+ f"Expected both '--aws-profile ' and '--provider aws' to be specified; got ({aws_profile}, {provider})",
+ )
+ if provider == "aws" and login_name != "ubuntu":
+ result = emitter.confirm(
+ f"When using AWS the expectation is that the login name would be 'ubuntu' and not '{login_name}'. Are you sure you want to continue using '{login_name}'?"
+ )
if not result:
- raise click.BadOptionUsage('--login-name', "Incorrect use of '--login-name'")
+ raise click.BadOptionUsage(
+ "--login-name", "Incorrect use of '--login-name'"
+ )
- playbook = Path(PLAYBOOKS).joinpath('recover_ursula_ops_data.yml')
+ playbook = Path(PLAYBOOKS).joinpath("recover_ursula_ops_data.yml")
instance_capture = {
- 'InstanceId': [],
- 'publicaddress': [],
- 'installed': [],
- 'host_nickname': [],
- 'eth_endpoint': [],
- 'polygon_endpoint': [],
- 'docker_image': [],
- 'operator address': [],
- 'nickname': [],
- 'rest url': [],
-
+ "InstanceId": [],
+ "publicaddress": [],
+ "installed": [],
+ "host_nickname": [],
+ "eth_endpoint": [],
+ "polygon_endpoint": [],
+ "docker_image": [],
+ "operator address": [],
+ "nickname": [],
+ "rest url": [],
# non-instance dictionary data
- '_ssh-fingerprint': [],
- '_instance-region': [],
- '_domain': [],
- '_keystore-password': [],
- '_operator-password': [],
-
+ "_ssh-fingerprint": [],
+ "_instance-region": [],
+ "_domain": [],
+ "_keystore-password": [],
+ "_operator-password": [],
# values need further processing
- '.cli-args': [],
+ ".cli-args": [],
}
- inventory_host_list = '{},'.format(",".join(include_hosts))
+ inventory_host_list = "{},".format(",".join(include_hosts))
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=inventory_host_list)
+ inventory = InventoryManager(loader=loader, sources=inventory_host_list)
hosts = inventory.get_hosts()
for host in hosts:
- host.set_variable('ansible_ssh_private_key_file', ssh_key_path)
- host.set_variable('default_user', login_name)
- host.set_variable('ansible_port', ssh_port)
- host.set_variable('ansible_connection', 'ssh')
- host.set_variable('cloud_provider', provider) # aws / digital ocean
+ host.set_variable("ansible_ssh_private_key_file", ssh_key_path)
+ host.set_variable("default_user", login_name)
+ host.set_variable("ansible_port", ssh_port)
+ host.set_variable("ansible_connection", "ssh")
+ host.set_variable("cloud_provider", provider) # aws / digital ocean
callback = AnsiblePlayBookResultsCollector(
- sock=emitter,
- return_results=instance_capture
+ sock=emitter, return_results=instance_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
@@ -306,27 +638,31 @@ def recover_node_config(include_hosts, namespace, provider, aws_profile, login_n
#
# Process data capture
# 1. remove namespace metadata; keys that start with '_'
- comparator_address_data = compare_and_remove_common_namespace_data(instance_capture, include_hosts)
- network = comparator_address_data['_domain']
+ comparator_address_data = compare_and_remove_common_namespace_data(
+ instance_capture, include_hosts
+ )
+ network = comparator_address_data["_domain"]
# 2. add deploy attributes
- add_deploy_attributes(instance_capture, include_hosts, ssh_key_path, login_name, ssh_port)
+ add_deploy_attributes(
+ instance_capture, include_hosts, ssh_key_path, login_name, ssh_port
+ )
pre_config_metadata = {
- "namespace": f'{namespace}-{network}',
- "keystorepassword": comparator_address_data['_keystore-password'],
- "ethpassword": comparator_address_data['_operator-password'],
+ "namespace": f"{namespace}-{network}",
+ "keystorepassword": comparator_address_data["_keystore-password"],
+ "ethpassword": comparator_address_data["_operator-password"],
"keystoremnemonic": "N/A (recovery mode)",
- "sshkey": comparator_address_data['_ssh-fingerprint'],
+ "sshkey": comparator_address_data["_ssh-fingerprint"],
}
# 3. remove/process metadata that needs some additional processing; keys that start with '.' eg. .cli-args
ignore_set = {"ursula", "run", "--network", network}
- cli_args = instance_capture.pop('.cli-args')
+ cli_args = instance_capture.pop(".cli-args")
for instance_address, runtime_args in cli_args:
# check for seed node
- if '--lonely' in runtime_args:
- pre_config_metadata['seed_node'] = instance_address
+ if "--lonely" in runtime_args:
+ pre_config_metadata["seed_node"] = instance_address
ursula_runtime_args = runtime_args.split(",")
index = 0
@@ -338,10 +674,12 @@ def recover_node_config(include_hosts, namespace, provider, aws_profile, login_n
if arg.startswith("--"):
# either single value like `--debug` or paired value like `--max-gas-price 50`
arg_key = arg[2:]
- arg_value = "" # single value
- if (index+1) < num_runtime_args and not ursula_runtime_args[index+1].startswith('--'):
+ arg_value = "" # single value
+ if (index + 1) < num_runtime_args and not ursula_runtime_args[
+ index + 1
+ ].startswith("--"):
# paired value
- arg_value = ursula_runtime_args[index+1]
+ arg_value = ursula_runtime_args[index + 1]
index += 1
addtional_args[arg_key] = arg_value
index += 1
@@ -350,37 +688,37 @@ def recover_node_config(include_hosts, namespace, provider, aws_profile, login_n
if not ursula_cli_args:
instance_capture["runtime_cliargs"] = ursula_cli_args
- ursula_cli_args.append(
- (instance_address, addtional_args)
- )
+ ursula_cli_args.append((instance_address, addtional_args))
# 4. Recover config
- region = comparator_address_data['_instance-region']
- if provider == 'digitalocean':
- pre_config_metadata['digital-ocean-region'] = region
+ region = comparator_address_data["_instance-region"]
+ if provider == "digitalocean":
+ pre_config_metadata["digital-ocean-region"] = region
else:
- aws_config_data = collect_aws_pre_config_data(aws_profile, region, include_hosts[0], ssh_key_path)
+ aws_config_data = collect_aws_pre_config_data(
+ aws_profile, region, include_hosts[0], ssh_key_path
+ )
pre_config_metadata.update(aws_config_data)
# set up pre-config instances
node_names = []
instances_dict = {}
- if provider == 'aws':
+ if provider == "aws":
# must update 'nickname' and 'host_nickname' entries - aws nicknames are local
- instance_capture['nickname'].clear()
+ instance_capture["nickname"].clear()
- old_host_nicknames = instance_capture.pop('host_nickname')
- instance_capture['host_nickname'] = []
+ old_host_nicknames = instance_capture.pop("host_nickname")
+ instance_capture["host_nickname"] = []
for ip_address, host_nickname in old_host_nicknames:
- if provider == 'aws':
+ if provider == "aws":
# update nickname for AWS
instance_info = get_aws_instance_info(aws_profile, region, ip_address)
- host_nickname = instance_info['Tags'][0]['Value']
- instance_capture['nickname'].append((ip_address, host_nickname))
+ host_nickname = instance_info["Tags"][0]["Value"]
+ instance_capture["nickname"].append((ip_address, host_nickname))
# either update for AWS or leave the same for DigitalOcean
- instance_capture['host_nickname'].append((ip_address, host_nickname))
+ instance_capture["host_nickname"].append((ip_address, host_nickname))
instances_dict[host_nickname] = {
"publicaddress": ip_address,
"installed": ["ursula"],
@@ -397,7 +735,7 @@ def recover_node_config(include_hosts, namespace, provider, aws_profile, login_n
namespace=namespace,
network=network,
pre_config=pre_config_metadata,
- resource_name='nucypher',
+ resource_name="nucypher",
)
# regenerate instance configuration file
diff --git a/nucypher_ops/constants.py b/nucypher_ops/constants.py
index da6e26b..d035298 100644
--- a/nucypher_ops/constants.py
+++ b/nucypher_ops/constants.py
@@ -1,10 +1,12 @@
import os
-from appdirs import AppDirs
from pathlib import Path
+from appdirs import AppDirs
+
APP_DIR = AppDirs("nucypher-ops")
DEFAULT_CONFIG_ROOT = Path(
- os.getenv('NUCYPHER_OPS_CONFIG_ROOT', default=APP_DIR.user_data_dir))
+ os.getenv("NUCYPHER_OPS_CONFIG_ROOT", default=APP_DIR.user_data_dir)
+)
MAINNET = 1
ROPSTEN = 3
@@ -21,23 +23,23 @@
RINKEBY: "Rinkeby",
GOERLI: "Goerli",
POLYGON_MAINNET: "Polygon/Mainnet",
- POLYGON_MUMBAI: "Polygon/Mumbai"
+ POLYGON_MUMBAI: "Polygon/Mumbai",
}
REVERSE_LOOKUP_CHAIN_NAMES = {v: k for k, v in CHAIN_NAMES.items()}
NETWORKS = {
- 'mainnet': {'policy': MAINNET, 'payment': POLYGON_MAINNET},
- 'lynx': {'policy': GOERLI, 'payment': POLYGON_MUMBAI},
- 'tapir': {'policy': GOERLI, 'payment': POLYGON_MUMBAI},
- 'test': {'policy': GOERLI, 'payment': POLYGON_MUMBAI},
- 'oryx': {'policy': GOERLI, 'payment': POLYGON_MAINNET}
+ "mainnet": {"policy": MAINNET, "payment": POLYGON_MAINNET},
+ "lynx": {"policy": GOERLI, "payment": POLYGON_MUMBAI},
+ "tapir": {"policy": GOERLI, "payment": POLYGON_MUMBAI},
+ "test": {"policy": GOERLI, "payment": POLYGON_MUMBAI},
+ "oryx": {"policy": GOERLI, "payment": POLYGON_MAINNET},
}
BASE_DIR = os.path.dirname(__file__)
-PLAYBOOKS = os.path.join(BASE_DIR, 'playbooks')
-TEMPLATES = os.path.join(BASE_DIR, 'templates')
+PLAYBOOKS = os.path.join(BASE_DIR, "playbooks")
+TEMPLATES = os.path.join(BASE_DIR, "templates")
# Environment variable names
NUCYPHER_ENVVAR_KEYSTORE_PASSWORD = "NUCYPHER_KEYSTORE_PASSWORD"
@@ -45,5 +47,5 @@
NUCYPHER_ENVVAR_OPERATOR_ETHEREUM_PASSWORD = "NUCYPHER_OPERATOR_ETH_PASSWORD"
NUCYPHER_ENVVAR_PROVIDER_URI = "NUCYPHER_PROVIDER_URI"
-DEFAULT_NAMESPACE = os.getenv('NUCYPHER_OPS_DEFAULT_NAMESPACE', 'nucypher')
-DEFAULT_NETWORK = os.getenv('NUCYPHER_OPS_DEFAULT_NETWORK', 'mainnet')
+DEFAULT_NAMESPACE = os.getenv("NUCYPHER_OPS_DEFAULT_NAMESPACE", "nucypher")
+DEFAULT_NETWORK = os.getenv("NUCYPHER_OPS_DEFAULT_NETWORK", "mainnet")
diff --git a/nucypher_ops/ops/ansible_utils.py b/nucypher_ops/ops/ansible_utils.py
index 3bd0f4b..10ff692 100644
--- a/nucypher_ops/ops/ansible_utils.py
+++ b/nucypher_ops/ops/ansible_utils.py
@@ -1,22 +1,16 @@
import re
-from ansible.plugins.callback import CallbackBase
+
from ansible import context as ansible_context
from ansible.module_utils.common.collections import ImmutableDict
+from ansible.plugins.callback import CallbackBase
ansible_context.CLIARGS = ImmutableDict(
- {
- 'syntax': False,
- 'start_at_task': None,
- 'verbosity': 0,
- 'become_method': 'sudo'
- }
+ {"syntax": False, "start_at_task": None, "verbosity": 0, "become_method": "sudo"}
)
class AnsiblePlayBookResultsCollector(CallbackBase):
- """
-
- """
+ """ """
def __init__(self, sock, *args, return_results=None, filter_output=None, **kwargs):
super().__init__(*args, **kwargs)
@@ -30,89 +24,87 @@ def v2_playbook_on_play_start(self, play):
return
name = play.get_name().strip()
if not name:
- msg = '\nPLAY {}\n'.format('*' * 100)
+ msg = "\nPLAY {}\n".format("*" * 100)
else:
- msg = '\nPLAY [{}] {}\n'.format(name, '*' * 100)
+ msg = "\nPLAY [{}] {}\n".format(name, "*" * 100)
self.send_save(msg)
def v2_playbook_on_task_start(self, task, is_conditional):
-
if self.filter_output is not None:
return
- if task.get_name() == 'Gathering Facts':
+ if task.get_name() == "Gathering Facts":
return
- msg = '\nTASK [{}] {}\n'.format(task.get_name(), '*' * 100)
+ msg = "\nTASK [{}] {}\n".format(task.get_name(), "*" * 100)
self.send_save(msg)
def v2_runner_on_ok(self, result, *args, **kwargs):
task_name = result._task.get_name()
- if self.filter_output is not None and not task_name in self.filter_output:
+ if self.filter_output is not None and task_name not in self.filter_output:
return
if self.filter_output is None:
if result.is_changed():
- data = '[{}]=> changed'.format(result._host.name)
+ data = "[{}]=> changed".format(result._host.name)
else:
- data = '[{}]=> ok'.format(result._host.name)
+ data = "[{}]=> ok".format(result._host.name)
+ self.send_save(data, color="yellow" if result.is_changed() else "green")
+ if "msg" in result._task_fields["args"]:
+ self.send_save("\n")
+ msg = result._task_fields["args"]["msg"]
self.send_save(
- data, color='yellow' if result.is_changed() else 'green')
- if 'msg' in result._task_fields['args']:
- self.send_save('\n')
- msg = result._task_fields['args']['msg']
- self.send_save(msg, color='white',)
+ msg,
+ color="white",
+ )
if self.results:
for k in self.results.keys():
- regex = fr'{k}:\s*(?P.*)'
+ regex = rf"{k}:\s*(?P.*)"
match = re.search(regex, msg, flags=re.MULTILINE)
if match:
self.results[k].append(
- (result._host.name, match.groupdict()['data']))
+ (result._host.name, match.groupdict()["data"])
+ )
def v2_runner_on_failed(self, result, *args, **kwargs):
if self.filter_output is not None:
return
- if 'changed' in result._result:
- del result._result['changed']
- data = 'fail: [{}]=> {}: {}'.format(
- result._host.name, 'failed',
- self._dump_results(result._result)
+ if "changed" in result._result:
+ del result._result["changed"]
+ data = "fail: [{}]=> {}: {}".format(
+ result._host.name, "failed", self._dump_results(result._result)
)
- self.send_save(data, color='red')
+ self.send_save(data, color="red")
def v2_runner_on_unreachable(self, result):
- if 'changed' in result._result:
- del result._result['changed']
- data = '[{}]=> {}: {}'.format(
- result._host.name,
- 'unreachable',
- self._dump_results(result._result)
+ if "changed" in result._result:
+ del result._result["changed"]
+ data = "[{}]=> {}: {}".format(
+ result._host.name, "unreachable", self._dump_results(result._result)
)
self.send_save(data)
def v2_runner_on_skipped(self, result):
if self.filter_output is not None:
return
- if 'changed' in result._result:
- del result._result['changed']
- data = '[{}]=> {}: {}'.format(
- result._host.name,
- 'skipped',
- self._dump_results(result._result)
+ if "changed" in result._result:
+ del result._result["changed"]
+ data = "[{}]=> {}: {}".format(
+ result._host.name, "skipped", self._dump_results(result._result)
)
- self.send_save(data, color='blue')
+ self.send_save(data, color="blue")
def v2_playbook_on_stats(self, stats):
if self.filter_output is not None:
return
hosts = sorted(stats.processed.keys())
- data = '\nPLAY RECAP {}\n'.format('*' * 100)
+ data = "\nPLAY RECAP {}\n".format("*" * 100)
self.send_save(data)
for h in hosts:
s = stats.summarize(h)
- msg = '{} : ok={} changed={} unreachable={} failed={} skipped={}'.format(
- h, s['ok'], s['changed'], s['unreachable'], s['failures'], s['skipped'])
+ msg = "{} : ok={} changed={} unreachable={} failed={} skipped={}".format(
+ h, s["ok"], s["changed"], s["unreachable"], s["failures"], s["skipped"]
+ )
self.send_save(msg)
def send_save(self, data, color=None):
diff --git a/nucypher_ops/ops/contracts.py b/nucypher_ops/ops/contracts.py
index 2adf681..f16068d 100644
--- a/nucypher_ops/ops/contracts.py
+++ b/nucypher_ops/ops/contracts.py
@@ -1,4 +1,4 @@
-from typing import Dict, Union
+from typing import Dict
import requests
@@ -10,12 +10,12 @@ class TACoContractRegistry:
"""
_PUBLICATION_REPO = "nucypher/nucypher"
- _BASE_URL = f'https://raw.githubusercontent.com/{_PUBLICATION_REPO}'
+ _BASE_URL = f"https://raw.githubusercontent.com/{_PUBLICATION_REPO}"
name = "GitHub Registry Source"
is_primary = True
- def __init__(self, domain='mainnet'):
+ def __init__(self, domain="mainnet"):
self.domain = domain
def get_publication_endpoint(self) -> str:
@@ -28,11 +28,13 @@ def fetch_latest_publication(self) -> Dict:
try:
# Fetch
response = requests.get(publication_endpoint)
- except requests.exceptions.ConnectionError as e:
+ except requests.exceptions.ConnectionError:
raise
if response.status_code != 200:
- raise AttributeError(f"No registry found at {self.get_publication_endpoint()}")
+ raise AttributeError(
+ f"No registry found at {self.get_publication_endpoint()}"
+ )
registry_data = response.json()
return registry_data
diff --git a/nucypher_ops/ops/fleet_ops.py b/nucypher_ops/ops/fleet_ops.py
index 80a533b..b43bc14 100644
--- a/nucypher_ops/ops/fleet_ops.py
+++ b/nucypher_ops/ops/fleet_ops.py
@@ -17,8 +17,13 @@
from mako.template import Template
from nucypher_ops.constants import (
- CHAIN_NAMES, DEFAULT_CONFIG_ROOT, NETWORKS, NUCYPHER_ENVVAR_KEYSTORE_PASSWORD,
- NUCYPHER_ENVVAR_OPERATOR_ETHEREUM_PASSWORD, PLAYBOOKS, TEMPLATES,
+ CHAIN_NAMES,
+ DEFAULT_CONFIG_ROOT,
+ NETWORKS,
+ NUCYPHER_ENVVAR_KEYSTORE_PASSWORD,
+ NUCYPHER_ENVVAR_OPERATOR_ETHEREUM_PASSWORD,
+ PLAYBOOKS,
+ TEMPLATES,
)
from nucypher_ops.ops import keygen
from nucypher_ops.ops.ansible_utils import AnsiblePlayBookResultsCollector
@@ -34,7 +39,7 @@
except ModuleNotFoundError:
pass
-NODE_CONFIG_STORAGE_KEY = 'configs'
+NODE_CONFIG_STORAGE_KEY = "configs"
def needs_provider(method):
@@ -42,64 +47,74 @@ def inner(self, *args, **kwargs):
provider = self.get_local_blockchain_provider()
try:
import web3
- except ModuleNotFoundError as e:
+ except ModuleNotFoundError:
raise ImportError(
- "web3 must be installed to use this functionality ('pip install web3')")
+ "web3 must be installed to use this functionality ('pip install web3')"
+ )
w3 = web3.Web3(web3.Web3.HTTPProvider(provider))
return method(self, w3, *args, **kwargs)
+
return inner
def needs_registry(method):
def inner(self, *args, **kwargs):
if self.contract_registry is None:
- chain_id = NETWORKS[self.network]['policy']
+ chain_id = NETWORKS[self.network]["policy"]
registry = TACoContractRegistry(domain=self.network)
latest_publication = registry.fetch_latest_publication()[str(chain_id)]
- self.contract_registry = {name: (
- info["address"], info["abi"]) for name, info in latest_publication.items()}
+ self.contract_registry = {
+ name: (info["address"], info["abi"])
+ for name, info in latest_publication.items()
+ }
return method(self, self.contract_registry, *args, **kwargs)
+
return inner
class BaseCloudNodeConfigurator:
-
- NAMESSPACE_CREATE_ACTIONS = ['add', 'create', 'copy']
- application = 'ursula'
+ NAMESSPACE_CREATE_ACTIONS = ["add", "create", "copy"]
+ application = "ursula"
required_fields = [
- 'eth_endpoint',
- 'polygon_endpoint',
- 'docker_image',
+ "eth_endpoint",
+ "polygon_endpoint",
+ "docker_image",
]
host_level_override_prompts = {
- 'eth_endpoint': {"prompt": "--eth-endpoint: please provide the url of a hosted ethereum node (infura/geth) which your nodes can access", "choices": None},
- 'polygon_endpoint': {"prompt": "--polygon-endpoint: please provide the url of a hosted level-two node (infura/bor) which your nodes can access", "choices": None},
+ "eth_endpoint": {
+ "prompt": "--eth-endpoint: please provide the url of a hosted ethereum node (infura/geth) which your nodes can access",
+ "choices": None,
+ },
+ "polygon_endpoint": {
+ "prompt": "--polygon-endpoint: please provide the url of a hosted level-two node (infura/bor) which your nodes can access",
+ "choices": None,
+ },
}
output_capture = {
- 'operator address': [],
- 'rest url': [],
- 'nucypher version': [],
- 'nickname': []
+ "operator address": [],
+ "rest url": [],
+ "nucypher version": [],
+ "nickname": [],
}
- def __init__(self, # TODO: Add type annotations
- emitter,
- seed_network=None,
- recovery_mode = False,
- pre_config=False,
- network=None,
- namespace=None,
- action=None,
- envvars=None,
- cliargs=None,
- resource_name=None,
- eth_endpoint=None,
- docker_image=None,
- **kwargs
- ):
-
+ def __init__(
+ self, # TODO: Add type annotations
+ emitter,
+ seed_network=None,
+ recovery_mode=False,
+ pre_config=False,
+ network=None,
+ namespace=None,
+ action=None,
+ envvars=None,
+ cliargs=None,
+ resource_name=None,
+ eth_endpoint=None,
+ docker_image=None,
+ **kwargs,
+ ):
self.emitter = emitter
self.network = network
self.namespace = namespace
@@ -111,66 +126,71 @@ def __init__(self, # TODO: Add type annotations
self.envvars = envvars or []
if self.envvars:
- if not all([(len(v.split('=', maxsplit=1)) == 2) for v in self.envvars]):
+ if not all([(len(v.split("=", maxsplit=1)) == 2) for v in self.envvars]):
raise ValueError(
- "Improperly specified environment variables: --env variables must be specified in pairs as `=`")
- self.envvars = [v.split('=', maxsplit=1) for v in (self.envvars)]
+ "Improperly specified environment variables: --env variables must be specified in pairs as `=`"
+ )
+ self.envvars = [v.split("=", maxsplit=1) for v in (self.envvars)]
cliargs = cliargs or []
self.cliargs = []
if cliargs:
for arg in cliargs:
- if '=' in arg:
- self.cliargs.append(arg.split('='))
+ if "=" in arg:
+ self.cliargs.append(arg.split("="))
else:
# allow for --flags like '--prometheus'
- self.cliargs.append((arg, ''))
+ self.cliargs.append((arg, ""))
- self.config_filename = f'{self.network}-{self.namespace}.json'
+ self.config_filename = f"{self.network}-{self.namespace}.json"
self.created_new_nodes = False
if pre_config or recovery_mode:
self.config = pre_config
- self.namespace_network = self.config.get('namespace')
+ self.namespace_network = self.config.get("namespace")
if recovery_mode:
- self.namespace_network = f'{self.network}-{self.namespace}'
+ self.namespace_network = f"{self.network}-{self.namespace}"
return
# where we save our state data so we can remember the resources we created for future use
- self.config_path = self.network_config_path / \
- self.namespace / self.config_filename
+ self.config_path = (
+ self.network_config_path / self.namespace / self.config_filename
+ )
self.config_dir = self.config_path.parent
# print (self.config_path)
if self.config_path.exists():
try:
self.config = json.load(open(self.config_path))
- except json.decoder.JSONDecodeError as e:
+ except json.decoder.JSONDecodeError:
self.emitter.echo(
- f"could not decode config file at: {self.config_path}")
+ f"could not decode config file at: {self.config_path}"
+ )
raise
- self.namespace_network = self.config['namespace']
- elif kwargs.get('read_only'):
+ self.namespace_network = self.config["namespace"]
+ elif kwargs.get("read_only"):
self.config = {
- 'instances': {},
+ "instances": {},
}
return
else:
- self.namespace_network = f'{self.network}-{self.namespace}-{maya.now().date.isoformat()}'
+ self.namespace_network = (
+ f"{self.network}-{self.namespace}-{maya.now().date.isoformat()}"
+ )
self.config = {
"namespace": self.namespace_network,
- "keystorepassword": b64encode(os.urandom(64)).decode('utf-8'),
- "ethpassword": b64encode(os.urandom(64)).decode('utf-8'),
- 'instances': {},
- 'eth_endpoint': eth_endpoint,
- 'docker_image': docker_image
+ "keystorepassword": b64encode(os.urandom(64)).decode("utf-8"),
+ "ethpassword": b64encode(os.urandom(64)).decode("utf-8"),
+ "instances": {},
+ "eth_endpoint": eth_endpoint,
+ "docker_image": docker_image,
}
self._write_config()
- if not self.config.get('keystoremnemonic'):
+ if not self.config.get("keystoremnemonic"):
wallet = keygen.generate()
- self.config['keystoremnemonic'] = wallet.mnemonic()
+ self.config["keystoremnemonic"] = wallet.mnemonic()
self.alert_new_mnemonic(wallet)
self._write_config()
# configure provider specific attributes
@@ -179,24 +199,31 @@ def __init__(self, # TODO: Add type annotations
# if certain config options have been specified with this invocation,
# save these to update host specific variables before deployment
# to allow for individual host config differentiation
- self.host_level_overrides = {k: v for k, v in {
- 'eth_endpoint': eth_endpoint,
- 'polygon_endpoint': self.kwargs.get('polygon_endpoint'),
- 'docker_image': docker_image,
- }.items() if k in self.required_fields}
+ self.host_level_overrides = {
+ k: v
+ for k, v in {
+ "eth_endpoint": eth_endpoint,
+ "polygon_endpoint": self.kwargs.get("polygon_endpoint"),
+ "docker_image": docker_image,
+ }.items()
+ if k in self.required_fields
+ }
- self.config['seed_network'] = seed_network if seed_network is not None else self.config.get(
- 'seed_network')
- if not self.config['seed_network']:
- self.config.pop('seed_node', None)
+ self.config["seed_network"] = (
+ seed_network
+ if seed_network is not None
+ else self.config.get("seed_network")
+ )
+ if not self.config["seed_network"]:
+ self.config.pop("seed_node", None)
# add instance key as host_nickname for use in inventory
- if self.config.get('instances'):
- for k, v in self.config['instances'].items():
- self.config['instances'][k]['host_nickname'] = k
+ if self.config.get("instances"):
+ for k, v in self.config["instances"].items():
+ self.config["instances"][k]["host_nickname"] = k
# migration of old instance config values here
- self._migrate_config_properties(self.config['instances'][k])
+ self._migrate_config_properties(self.config["instances"][k])
self._write_config()
@@ -239,18 +266,18 @@ def _migrate_config_properties(self, node_config: Dict):
@property
def user(self) -> str:
- return 'nucypher'
+ return "nucypher"
def _write_config(self):
config_dir = self.config_path.parent
config_dir.mkdir(parents=True, exist_ok=True)
- with open(self.config_path, 'w') as outfile:
+ with open(self.config_path, "w") as outfile:
json.dump(self.config, outfile, indent=4)
@property
def instance_count(self):
- return len(self.config['instances'].keys())
+ return len(self.config["instances"].keys())
@property
def network_config_path(self):
@@ -262,11 +289,11 @@ def _provider_deploy_attrs(self):
@property
def backup_directory(self):
- return f'{self.config_dir}/remote_operator_backups/'
+ return f"{self.config_dir}/remote_operator_backups/"
@property
def has_wallet(self):
- return self.config.get('local_wallet_keystore') is not None
+ return self.config.get("local_wallet_keystore") is not None
def _configure_provider_params(self):
pass
@@ -275,12 +302,12 @@ def _do_setup_for_instance_creation(self):
pass
def _format_runtime_options(self, node_options):
- node_options.update({'domain': self.network})
- return ' '.join([f'--{name} {value}' for name, value in node_options.items()])
+ node_options.update({"domain": self.network})
+ return " ".join([f"--{name} {value}" for name, value in node_options.items()])
@property
def chain_id(self):
- return NETWORKS[self.network]['policy']
+ return NETWORKS[self.network]["policy"]
@property
def chain_name(self):
@@ -288,51 +315,61 @@ def chain_name(self):
return CHAIN_NAMES[self.chain_id].lower()
except KeyError:
self.emitter.echo(
- f"could not identify public blockchain for {self.network}", color="red")
+ f"could not identify public blockchain for {self.network}", color="red"
+ )
@property
def inventory_path(self):
- return str(Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ansible_inventory.yml'))
+ return str(
+ Path(DEFAULT_CONFIG_ROOT).joinpath(
+ NODE_CONFIG_STORAGE_KEY,
+ f"{self.namespace_network}.ansible_inventory.yml",
+ )
+ )
def default_config(self):
defaults = {
- 'envvars':
- [
- (NUCYPHER_ENVVAR_KEYSTORE_PASSWORD,
- self.config['keystorepassword']),
- (NUCYPHER_ENVVAR_OPERATOR_ETHEREUM_PASSWORD,
- self.config['ethpassword']),
-
- ("OPERATOR_ETHEREUM_PASSWORD", # TODO: Remove this (it's for backwards compatibility)
- self.config['ethpassword']),
-
- ],
- 'cliargs': []
+ "envvars": [
+ (NUCYPHER_ENVVAR_KEYSTORE_PASSWORD, self.config["keystorepassword"]),
+ (
+ NUCYPHER_ENVVAR_OPERATOR_ETHEREUM_PASSWORD,
+ self.config["ethpassword"],
+ ),
+ (
+ "OPERATOR_ETHEREUM_PASSWORD", # TODO: Remove this (it's for backwards compatibility)
+ self.config["ethpassword"],
+ ),
+ ],
+ "cliargs": [],
}
return defaults
- def update_generate_inventory(self, node_names, generate_keymaterial=False, **kwargs):
-
+ def update_generate_inventory(
+ self, node_names, generate_keymaterial=False, **kwargs
+ ):
# filter out the nodes we will not be dealing with
- nodes = {key: value for key,
- value in self.config['instances'].items() if key in node_names}
+ nodes = {
+ key: value
+ for key, value in self.config["instances"].items()
+ if key in node_names
+ }
if not nodes:
raise KeyError(
- f"No hosts matched the supplied host names: {node_names}; ensure `host-nickname` is used. Try `nucypher-ops nodes list --all` to view hosts or create new hosts with `nucypher-ops nodes create`")
+ f"No hosts matched the supplied host names: {node_names}; ensure `host-nickname` is used. "
+ f"Try `nucypher-ops nodes list --all` to view hosts or create new hosts with `nucypher-ops nodes create`"
+ )
# migrate values if necessary
for key, node in nodes.items():
self._migrate_config_properties(nodes[key])
defaults = self.default_config()
- if generate_keymaterial or kwargs.get('migrate_nucypher') or kwargs.get('init'):
- wallet = keygen.restore(self.config['keystoremnemonic'])
- keypairs = list(keygen.derive(
- wallet, quantity=self.instance_count))
+ if generate_keymaterial or kwargs.get("migrate_nucypher") or kwargs.get("init"):
+ wallet = keygen.restore(self.config["keystoremnemonic"])
+ keypairs = list(keygen.derive(wallet, quantity=self.instance_count))
- for datatype in ['envvars', 'cliargs']:
-
- data_key = f'runtime_{datatype}'
+ for datatype in ["envvars", "cliargs"]:
+ data_key = f"runtime_{datatype}"
input_data = [(k, v) for k, v in getattr(self, datatype)]
@@ -347,17 +384,21 @@ def update_generate_inventory(self, node_names, generate_keymaterial=False, **kw
# we want to update the config with the specified values
# so they will persist in future invocations
- self.config['instances'][key] = copy.deepcopy(nodes[key])
+ self.config["instances"][key] = copy.deepcopy(nodes[key])
# we don't want to save the default_envvars to the config file
# but we do want them to be specified to the inventory template
# but overridden on a per-node basis if previously specified
for key, node in nodes.items():
for k, v in defaults[datatype]:
- if not k in nodes[key][data_key]:
+ if k not in nodes[key][data_key]:
nodes[key][data_key][k] = v
- if generate_keymaterial or kwargs.get('migrate_nucypher') or kwargs.get('init'):
- node['keymaterial'] = keypairs[node['index']][1]
+ if (
+ generate_keymaterial
+ or kwargs.get("migrate_nucypher")
+ or kwargs.get("init")
+ ):
+ node["keymaterial"] = keypairs[node["index"]][1]
inventory_content = self._inventory_template.render(
deployer=self,
@@ -365,7 +406,7 @@ def update_generate_inventory(self, node_names, generate_keymaterial=False, **kw
extra=kwargs,
)
- with open(self.inventory_path, 'w') as outfile:
+ with open(self.inventory_path, "w") as outfile:
outfile.write(inventory_content)
self.emitter.echo(f"wrote new inventory to: {self.inventory_path}")
@@ -378,28 +419,28 @@ def create_nodes(self, node_names):
self.give_helpful_hints(node_names)
count = len(node_names)
self.emitter.echo(
- f"ensuring cloud nodes exist for the following {count} node names:")
+ f"ensuring cloud nodes exist for the following {count} node names:"
+ )
for s in node_names:
- self.emitter.echo(f'\t{s}')
+ self.emitter.echo(f"\t{s}")
time.sleep(3)
self._do_setup_for_instance_creation()
- if not self.config.get('instances'):
- self.config['instances'] = {}
+ if not self.config.get("instances"):
+ self.config["instances"] = {}
for node_name in node_names:
- existing_node = self.config['instances'].get(node_name)
+ existing_node = self.config["instances"].get(node_name)
if not existing_node:
- self.emitter.echo(
- f'creating new node for {node_name}', color='yellow')
+ self.emitter.echo(f"creating new node for {node_name}", color="yellow")
time.sleep(3)
node_data = self.create_new_node(node_name)
- node_data['host_nickname'] = node_name
- node_data['provider'] = self.provider_name
- node_data['index'] = len(self.config['instances'].keys())
- self.config['instances'][node_name] = node_data
- if self.config['seed_network'] and not self.config.get('seed_node'):
- self.config['seed_node'] = node_data['publicaddress']
+ node_data["host_nickname"] = node_name
+ node_data["provider"] = self.provider_name
+ node_data["index"] = len(self.config["instances"].keys())
+ self.config["instances"][node_name] = node_data
+ if self.config["seed_network"] and not self.config.get("seed_node"):
+ self.config["seed_node"] = node_data["publicaddress"]
self._write_config()
self.created_new_nodes = True
@@ -407,7 +448,7 @@ def create_nodes(self, node_names):
@property
def _inventory_template(self):
- template_path = Path(TEMPLATES).joinpath('ursula_inventory.mako')
+ template_path = Path(TEMPLATES).joinpath("ursula_inventory.mako")
return Template(filename=str(template_path))
def configure_host_level_overrides(self, node_names):
@@ -419,70 +460,90 @@ def configure_host_level_overrides(self, node_names):
# if an instance already has a specified value, we only override
# it if that value was input for this command invocation
if input_specified_value:
- self.config['instances'][node_name][k] = input_specified_value
+ self.config["instances"][node_name][k] = input_specified_value
# if this node has not already been configured with this info
- elif not self.config['instances'][node_name].get(k):
+ elif not self.config["instances"][node_name].get(k):
# do we have it in our global config?
if self.config.get(k):
- self.config['instances'][node_name][k] = self.config.get(
- k)
+ self.config["instances"][node_name][k] = self.config.get(k)
# have we already prompted the user for this info?
elif input_values.get(k):
- self.config['instances'][node_name][k] = input_values[k]
+ self.config["instances"][node_name][k] = input_values[k]
# if not, prompt the user.
else:
ux = self.host_level_override_prompts[k]
- if ux.get('choices'):
+ if ux.get("choices"):
input_values[k] = self.emitter.prompt(
- ux['prompt'], type=self.emitter.Choice(ux['choices']), show_choices=False)
+ ux["prompt"],
+ type=self.emitter.Choice(ux["choices"]),
+ show_choices=False,
+ )
else:
- input_values[k] = self.emitter.prompt(ux['prompt'])
- self.config['instances'][node_name][k] = input_values[k]
+ input_values[k] = self.emitter.prompt(ux["prompt"])
+ self.config["instances"][node_name][k] = input_values[k]
self._write_config()
- def deploy_nucypher_on_existing_nodes(self, node_names, migrate_nucypher=False, init=False, **kwargs):
-
+ def deploy_nucypher_on_existing_nodes(
+ self, node_names, migrate_nucypher=False, init=False, **kwargs
+ ):
if migrate_nucypher or init:
- keep_going = self.emitter.prompt(
- "Proceeding with this operation will delete information from your nodes including wallets and keys. Are you sure? (type 'yes')") == 'yes'
+ keep_going = (
+ self.emitter.prompt(
+ "Proceeding with this operation will delete information from your nodes including wallets and keys. Are you sure? (type 'yes')"
+ )
+ == "yes"
+ )
if init:
- keep_going = self.emitter.prompt(
- "Proceeding with this operation will delete your node's eth wallet so make sure it does not posses any significant funds... Are you sure? (type 'yes')") == 'yes'
+ keep_going = (
+ self.emitter.prompt(
+ "Proceeding with this operation will delete your node's eth wallet so make sure it does not posses any significant funds... Are you sure? (type 'yes')"
+ )
+ == "yes"
+ )
if not keep_going:
return
if migrate_nucypher:
self.migrate(**kwargs)
- playbook = Path(PLAYBOOKS).joinpath('setup_remote_workers.yml')
+ playbook = Path(PLAYBOOKS).joinpath("setup_remote_workers.yml")
self.configure_host_level_overrides(node_names)
if self.created_new_nodes:
self.emitter.echo(
- "--- Giving newly created nodes some time to get ready ----")
- with self.emitter.progressbar(range(0, 30), show_eta=False, show_percent=False) as bar:
+ "--- Giving newly created nodes some time to get ready ----"
+ )
+ with self.emitter.progressbar(
+ range(0, 30), show_eta=False, show_percent=False
+ ) as bar:
for tick in bar:
time.sleep(1)
self.emitter.echo(
- 'Running ansible deployment for all running nodes.', color='green')
+ "Running ansible deployment for all running nodes.", color="green"
+ )
- if self.config.get('seed_network') is True and not self.config.get('seed_node'):
- self.config['seed_node'] = list(self.config['instances'].values())[
- 0]['publicaddress']
+ if self.config.get("seed_network") is True and not self.config.get("seed_node"):
+ self.config["seed_node"] = list(self.config["instances"].values())[0][
+ "publicaddress"
+ ]
self._write_config()
self.update_generate_inventory(
- node_names, generate_keymaterial=True, migrate_nucypher=migrate_nucypher, init=init)
+ node_names,
+ generate_keymaterial=True,
+ migrate_nucypher=migrate_nucypher,
+ init=init,
+ )
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -496,32 +557,32 @@ def deploy_nucypher_on_existing_nodes(self, node_names, migrate_nucypher=False,
executor.run()
for k in node_names:
- installed = self.config['instances'][k].get('installed', [])
+ installed = self.config["instances"][k].get("installed", [])
installed = list(set(installed + [self.application]))
- self.config['instances'][k]['installed'] = installed
+ self.config["instances"][k]["installed"] = installed
self._write_config()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
def update_nucypher_on_existing_nodes(self, node_names):
-
- playbook = Path(PLAYBOOKS).joinpath('update_remote_workers.yml')
+ playbook = Path(PLAYBOOKS).joinpath("update_remote_workers.yml")
self.configure_host_level_overrides(node_names)
- if self.config.get('seed_network') is True and not self.config.get('seed_node'):
- self.config['seed_node'] = list(self.config['instances'].values())[
- 0]['publicaddress']
+ if self.config.get("seed_network") is True and not self.config.get("seed_node"):
+ self.config["seed_node"] = list(self.config["instances"].values())[0][
+ "publicaddress"
+ ]
self._write_config()
self.update_generate_inventory(node_names)
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -538,18 +599,18 @@ def update_nucypher_on_existing_nodes(self, node_names):
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
def get_worker_status(self, node_names, fast=False):
-
- playbook = Path(PLAYBOOKS).joinpath('get_workers_status.yml')
+ playbook = Path(PLAYBOOKS).joinpath("get_workers_status.yml")
if not fast:
self.update_generate_inventory(node_names)
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
- callback = AnsiblePlayBookResultsCollector(sock=self.emitter, return_results=self.output_capture, filter_output=[
- "Print Ursula Status Data", "Print Last Log Line"])
- variable_manager = VariableManager(
- loader=loader, inventory=inventory)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
+ callback = AnsiblePlayBookResultsCollector(
+ sock=self.emitter,
+ return_results=self.output_capture,
+ filter_output=["Print Ursula Status Data", "Print Last Log Line"],
+ )
+ variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
@@ -565,16 +626,15 @@ def get_worker_status(self, node_names, fast=False):
self.give_helpful_hints(node_names, playbook=playbook)
def print_worker_logs(self, node_names):
-
- playbook = Path(PLAYBOOKS).joinpath('get_worker_logs.yml')
+ playbook = Path(PLAYBOOKS).joinpath("get_worker_logs.yml")
self.update_generate_inventory(node_names)
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -591,15 +651,14 @@ def print_worker_logs(self, node_names):
self.give_helpful_hints(node_names, playbook=playbook)
def backup_remote_data(self, node_names):
-
- playbook = Path(PLAYBOOKS).joinpath('backup_remote_workers.yml')
+ playbook = Path(PLAYBOOKS).joinpath("backup_remote_workers.yml")
self.update_generate_inventory(node_names)
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -615,15 +674,14 @@ def backup_remote_data(self, node_names):
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
def stop_worker_process(self, node_names):
-
- playbook = Path(PLAYBOOKS).joinpath('stop_remote_workers.yml')
+ playbook = Path(PLAYBOOKS).joinpath("stop_remote_workers.yml")
self.update_generate_inventory(node_names)
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -639,16 +697,15 @@ def stop_worker_process(self, node_names):
self.give_helpful_hints(node_names, playbook=playbook)
def restore_from_backup(self, target_host, source_path):
-
- playbook = Path(PLAYBOOKS).joinpath('restore_ursula_from_backup.yml')
+ playbook = Path(PLAYBOOKS).joinpath("restore_ursula_from_backup.yml")
self.update_generate_inventory([target_host], restore_path=source_path)
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -664,8 +721,9 @@ def restore_from_backup(self, target_host, source_path):
def get_provider_hosts(self):
return [
- (node_name, host_data) for node_name, host_data in self.get_all_hosts()
- if host_data['provider'] == self.provider_name
+ (node_name, host_data)
+ for node_name, host_data in self.get_all_hosts()
+ if host_data["provider"] == self.provider_name
]
def get_namespace_names(self, namespace=None):
@@ -679,40 +737,54 @@ def get_namespace_names(self, namespace=None):
def get_namespace_data(self, namespace=None):
for ns in self.get_namespace_names(namespace):
- dep = CloudDeployers.get_deployer('generic')(
- self.emitter,
- namespace=ns,
- network=self.network,
- read_only=True
+ dep = CloudDeployers.get_deployer("generic")(
+ self.emitter, namespace=ns, network=self.network, read_only=True
)
yield (ns, dep.get_all_hosts())
def get_host_by_name(self, host_name):
try:
- return next(host_data for node_name, host_data in self.get_all_hosts() if node_name == host_name)
+ return next(
+ host_data
+ for node_name, host_data in self.get_all_hosts()
+ if node_name == host_name
+ )
except StopIteration:
return None
def get_all_hosts(self):
- return [(node_name, host_data) for node_name, host_data in self.config.get('instances', {}).items()]
+ return [
+ (node_name, host_data)
+ for node_name, host_data in self.config.get("instances", {}).items()
+ ]
def add_already_configured_node(self, host_data):
- if self.get_host_by_name(host_data['host_nickname']):
+ if self.get_host_by_name(host_data["host_nickname"]):
raise AttributeError(
- f"Host with nickname {host_data['host_nickname']} already exists in {self.network}/{self.namespace}")
- host_data['index'] = self.instance_count
- self.config['instances'][host_data['host_nickname']] = host_data
+ f"Host with nickname {host_data['host_nickname']} already exists in {self.network}/{self.namespace}"
+ )
+ host_data["index"] = self.instance_count
+ self.config["instances"][host_data["host_nickname"]] = host_data
self._write_config()
def destroy_resources(self, node_names):
- node_names = [s for s in node_names if s in [
- names for names, data in self.get_provider_hosts()]]
- if self.emitter.prompt(f"Destroying {self.provider_name} instances for nodes: {' '.join(node_names)}. Continue? (type 'yes')") == 'yes':
-
+ node_names = [
+ s
+ for s in node_names
+ if s in [names for names, data in self.get_provider_hosts()]
+ ]
+ if (
+ self.emitter.prompt(
+ f"Destroying {self.provider_name} instances for nodes: {' '.join(node_names)}. Continue? (type 'yes')"
+ )
+ == "yes"
+ ):
if self._destroy_resources(node_names):
- if not self.config.get('instances'):
+ if not self.config.get("instances"):
self.emitter.echo(
- f"deleted all requested resources for {self.provider_name}. We are clean. No money is being spent.", color="green")
+ f"deleted all requested resources for {self.provider_name}. We are clean. No money is being spent.",
+ color="green",
+ )
def _destroy_resources(self, *args, **kwargs):
raise NotImplementedError
@@ -722,7 +794,9 @@ def recover_instance_config(self, instance_data, config_filepath=None):
raise ValueError("Don't call function unless in recovery mode")
if not config_filepath:
- config_filepath = self.network_config_path / self.namespace / self.config_filename
+ config_filepath = (
+ self.network_config_path / self.namespace / self.config_filename
+ )
self.config_path = config_filepath
self.config_dir = self.config_path.parent
instances_by_public_address = {}
@@ -752,33 +826,37 @@ def recover_instance_config(self, instance_data, config_filepath=None):
def update_captured_instance_data(self, results):
instances_by_public_address = {
- d['publicaddress']: d for d in self.config['instances'].values()}
+ d["publicaddress"]: d for d in self.config["instances"].values()
+ }
for k, data in results.items():
# results are keyed by 'publicaddress' in config data
for instance_address, value in data:
instances_by_public_address[instance_address][k] = value
- for k, v in self.config['instances'].items():
- if instances_by_public_address.get(v['publicaddress']):
- self.config['instances'][k] = instances_by_public_address.get(
- v['publicaddress'])
+ for k, v in self.config["instances"].items():
+ if instances_by_public_address.get(v["publicaddress"]):
+ self.config["instances"][k] = instances_by_public_address.get(
+ v["publicaddress"]
+ )
self._write_config()
def give_helpful_hints(self, node_names, backup=False, playbook=None):
-
self.emitter.echo("some relevant info:")
self.emitter.echo(f' config file: "{self.config_path}"')
- self.emitter.echo(
- f" inventory file: {self.inventory_path}", color='yellow')
- if self.config.get('keypair_path'):
+ self.emitter.echo(f" inventory file: {self.inventory_path}", color="yellow")
+ if self.config.get("keypair_path"):
self.emitter.echo(
- f" keypair file: {self.config['keypair_path']}", color='yellow')
+ f" keypair file: {self.config['keypair_path']}", color="yellow"
+ )
if backup:
self.emitter.echo(
- " *** Local backups containing sensitive data may have been created. ***", color="red")
+ " *** Local backups containing sensitive data may have been created. ***",
+ color="red",
+ )
self.emitter.echo(
- f" Backup data can be found here: {self.backup_directory}")
+ f" Backup data can be found here: {self.backup_directory}"
+ )
if nodes := [h for h in self.get_all_hosts() if h[0] in node_names]:
self.emitter.echo("Host Info")
@@ -787,263 +865,288 @@ def give_helpful_hints(self, node_names, backup=False, playbook=None):
def print_node_data(self, node_name, host_data):
warnings.filterwarnings("ignore")
- dep = CloudDeployers.get_deployer(host_data['provider'])(
+ dep = CloudDeployers.get_deployer(host_data["provider"])(
self.emitter,
pre_config=self.config,
namespace=self.namespace,
- network=self.network
+ network=self.network,
)
- self.emitter.echo(
- f"\t{node_name}: {host_data['publicaddress']}")
- self.emitter.echo(
- f"\t\t {dep.format_ssh_cmd(host_data)}", color="yellow")
- if host_data.get('operator address'):
- self.emitter.echo(
- f"\t\t operator address: {host_data['operator address']}")
- if self.config.get('local_blockchain_provider'):
+ self.emitter.echo(f"\t{node_name}: {host_data['publicaddress']}")
+ self.emitter.echo(f"\t\t {dep.format_ssh_cmd(host_data)}", color="yellow")
+ if host_data.get("operator address"):
+ self.emitter.echo(f"\t\t operator address: {host_data['operator address']}")
+ if self.config.get("local_blockchain_provider"):
wallet_balance = self.get_wallet_balance(
- host_data['operator address'], eth=True)
- self.emitter.echo(
- f"\t\t operator ETH balance: {wallet_balance}"
+ host_data["operator address"], eth=True
)
+ self.emitter.echo(f"\t\t operator ETH balance: {wallet_balance}")
staking_provider = None
try:
staking_provider = self.get_staking_provider(
- host_data['operator address'])
+ host_data["operator address"]
+ )
self.emitter.echo(
- f"\t\t staking provider address: {staking_provider}")
+ f"\t\t staking provider address: {staking_provider}"
+ )
if staking_provider:
# if we have a staking provider, lets check if the node is confirmed
is_confirmed = self.check_is_confirmed(
- host_data['operator address'])
- self.emitter.echo(
- f"\t\t operator confirmed: {is_confirmed}")
+ host_data["operator address"]
+ )
+ self.emitter.echo(f"\t\t operator confirmed: {is_confirmed}")
stake_amount = self.get_stake_amount(staking_provider)
- self.emitter.echo(
- f"\t\t staked amount: {stake_amount:,}")
+ self.emitter.echo(f"\t\t staked amount: {stake_amount:,}")
if is_confirmed:
# if the node is confirmed, we should be able to query it
try:
node_response = self.query_active_node(
- host_data['publicaddress'])
+ host_data["publicaddress"]
+ )
self.emitter.echo("\t\t active node status:")
self.emitter.echo(
- f"\t\t\tnickname: {node_response['nickname']['text']}")
+ f"\t\t\tnickname: {node_response['nickname']['text']}"
+ )
self.emitter.echo(
- f"\t\t\trest url: {node_response['rest_url']}")
+ f"\t\t\trest url: {node_response['rest_url']}"
+ )
self.emitter.echo(
- f"\t\t\tknown nodes: {len(node_response['known_nodes'])}")
+ f"\t\t\tknown nodes: {len(node_response['known_nodes'])}"
+ )
self.emitter.echo(
- f"\t\t\tfleet state: {len(node_response['fleet_state'])}")
+ f"\t\t\tfleet state: {len(node_response['fleet_state'])}"
+ )
except Exception as e:
print(e)
except Exception as e:
raise e
if not staking_provider:
self.emitter.echo(
- f"\t\t staking provider: NOT BOUND TO STAKING PROVIDER")
+ "\t\t staking provider: NOT BOUND TO STAKING PROVIDER"
+ )
def format_ssh_cmd(self, host_data):
+ keypair = ""
+ user = next(
+ v["value"]
+ for v in host_data["provider_deploy_attrs"]
+ if v["key"] == "default_user"
+ )
- keypair = ''
- user = next(v['value'] for v in host_data['provider_deploy_attrs']
- if v['key'] == 'default_user')
-
- if any([pda['key'] == 'ansible_ssh_private_key_file' for pda in host_data['provider_deploy_attrs']]):
- keypair = '-i "' + next(v['value'] for v in host_data['provider_deploy_attrs']
- if v['key'] == 'ansible_ssh_private_key_file') + '"'
+ if any(
+ [
+ pda["key"] == "ansible_ssh_private_key_file"
+ for pda in host_data["provider_deploy_attrs"]
+ ]
+ ):
+ keypair = (
+ '-i "'
+ + next(
+ v["value"]
+ for v in host_data["provider_deploy_attrs"]
+ if v["key"] == "ansible_ssh_private_key_file"
+ )
+ + '"'
+ )
return f"ssh {user}@{host_data['publicaddress']} {keypair}"
def alert_new_mnemonic(self, wallet):
+ self.emitter.echo("A new keystore mnemonic has been generated:", color="yellow")
+ self.emitter.echo(f"\t{wallet.mnemonic()}", color="red")
+ self.emitter.echo("This will be stored in the config file for this namespace.")
+ self.emitter.echo(f"\t{self.config_path}")
self.emitter.echo(
- "A new keystore mnemonic has been generated:", color="yellow")
- self.emitter.echo(f'\t{wallet.mnemonic()}', color="red")
- self.emitter.echo(
- "This will be stored in the config file for this namespace.")
- self.emitter.echo(f'\t{self.config_path}')
- self.emitter.echo(
- "Back this up and keep it safe to enable restoration of Ursula nodes in the event of hard disk failure or other data loss.", color="red")
+ "Back this up and keep it safe to enable restoration of Ursula nodes in the event of hard disk failure or other data loss.",
+ color="red",
+ )
def new_mnemonic(self):
wallet = keygen.generate()
- self.config['keystoremnemonic'] = wallet.mnemonic()
+ self.config["keystoremnemonic"] = wallet.mnemonic()
self.alert_new_mnemonic(wallet)
def migrate_5_6(self):
- for index, instance in enumerate(self.config['instances'].keys()):
- if not self.config['instances'][instance].get('index'):
- self.config['instances'][instance]['index'] = index
- if instance.runtime_envvars.get('NUCYPHER_WORKER_ETH_PASSWORD'):
- instance.runtime_envvars['NUCYPHER_OPERATOR_ETH_PASSWORD'] = instance.runtime_envvars.get(
- 'NUCYPHER_WORKER_ETH_PASSWORD')
- del instance.runtime_envvars['NUCYPHER_WORKER_ETH_PASSWORD']
-
- if self.config.get('keyringpassword'):
- self.config['keystorepassword'] = self.config.get(
- 'keyringpassword')
- if not self.config.get('keystoremnemonic'):
+ for index, instance in enumerate(self.config["instances"].keys()):
+ if not self.config["instances"][instance].get("index"):
+ self.config["instances"][instance]["index"] = index
+ if instance.runtime_envvars.get("NUCYPHER_WORKER_ETH_PASSWORD"):
+ instance.runtime_envvars[
+ "NUCYPHER_OPERATOR_ETH_PASSWORD"
+ ] = instance.runtime_envvars.get("NUCYPHER_WORKER_ETH_PASSWORD")
+ del instance.runtime_envvars["NUCYPHER_WORKER_ETH_PASSWORD"]
+
+ if self.config.get("keyringpassword"):
+ self.config["keystorepassword"] = self.config.get("keyringpassword")
+ if not self.config.get("keystoremnemonic"):
self.new_mnemonic()
self._write_config()
def migrate(self, current=5, target=6):
- migration = f'migrate_{current}_{target}'
+ migration = f"migrate_{current}_{target}"
if hasattr(self, migration):
return getattr(self, migration)()
self.emitter.echo(
- f" *** Error: Couldn't find migration from {current} to {target} ***", color="red")
+ f" *** Error: Couldn't find migration from {current} to {target} ***",
+ color="red",
+ )
def remove_resources(self, hostnames):
for host in hostnames:
- existing_instances = {k: v for k, v in self.config.get(
- 'instances', {}).items() if k in hostnames}
+ existing_instances = {
+ k: v
+ for k, v in self.config.get("instances", {}).items()
+ if k in hostnames
+ }
if existing_instances:
for node_name, instance in existing_instances.items():
self.emitter.echo(
- f"removing instance data for {node_name} in 3 seconds...", color='red')
+ f"removing instance data for {node_name} in 3 seconds...",
+ color="red",
+ )
time.sleep(3)
- del self.config['instances'][node_name]
+ del self.config["instances"][node_name]
self._write_config()
def get_local_blockchain_provider(self):
- if not self.config.get('local_blockchain_provider'):
+ if not self.config.get("local_blockchain_provider"):
blockchain_provider = self.emitter.prompt(
- "Please enter a blockchain provider for this local wallet to access the blockchain.")
- self.config['local_blockchain_provider'] = blockchain_provider
+ "Please enter a blockchain provider for this local wallet to access the blockchain."
+ )
+ self.config["local_blockchain_provider"] = blockchain_provider
self._write_config()
- return self.config.get('local_blockchain_provider')
+ return self.config.get("local_blockchain_provider")
def get_or_create_local_wallet(self, password):
-
try:
import web3
except ImportError:
self.emitter.echo(
- "web3 must be installed to use this functionality ('pip install web3')")
+ "web3 must be installed to use this functionality ('pip install web3')"
+ )
if not self.has_wallet:
# uses index 0 by default which will not be used by any subsequent node
account = web3.Account.create()
keystore = web3.Account.encrypt(account.privateKey, password)
- keystore_b64 = b64encode(json.dumps(
- keystore).encode()).decode('utf-8')
- self.config['local_wallet_keystore'] = keystore_b64
+ keystore_b64 = b64encode(json.dumps(keystore).encode()).decode("utf-8")
+ self.config["local_wallet_keystore"] = keystore_b64
self._write_config()
- account_keystore = b64decode(
- self.config['local_wallet_keystore']).decode()
+ account_keystore = b64decode(self.config["local_wallet_keystore"]).decode()
return web3.Account.from_key(web3.Account.decrypt(account_keystore, password))
@needs_provider
def get_wallet_balance(self, web3, address, eth=False):
balance = web3.eth.get_balance(address)
if eth:
- return web3.fromWei(balance, 'ether')
+ return web3.fromWei(balance, "ether")
return balance
@needs_registry
@needs_provider
def get_staking_provider(self, web3, contracts, address):
- contract_address, abi = contracts['TACoApplication']
+ contract_address, abi = contracts["TACoApplication"]
contract = web3.eth.contract(abi=abi, address=contract_address)
return contract.functions.stakingProviderFromOperator(address).call()
@needs_registry
@needs_provider
def check_is_confirmed(self, web3, contracts, address):
- contract_address, abi = contracts['TACoApplication']
+ contract_address, abi = contracts["TACoApplication"]
contract = web3.eth.contract(abi=abi, address=contract_address)
return contract.functions.isOperatorConfirmed(address).call()
@needs_registry
@needs_provider
def get_stake_amount(self, web3, contracts, address):
- contract_address, abi = contracts['TACoApplication']
+ contract_address, abi = contracts["TACoApplication"]
contract = web3.eth.contract(abi=abi, address=contract_address)
balance = contract.functions.authorizedStake(address).call()
- return int(web3.fromWei(balance, 'ether'))
+ return int(web3.fromWei(balance, "ether"))
def query_active_node(self, public_address):
- return requests.get(f"https://{public_address}:9151/status/?json=true", verify=False).json()
+ return requests.get(
+ f"https://{public_address}:9151/status/?json=true", verify=False
+ ).json()
@needs_provider
def fund_nodes(self, web3, wallet, node_names, amount):
hosts = [h for h in self.get_all_hosts() if h[0] in node_names]
for name, host in hosts:
next_tx = False
- if not host.get('operator address'):
+ if not host.get("operator address"):
raise AttributeError(
- f"missing operator address for node: {host['host_nickname']}. Deploy ursula first to create an operator address.")
+ f"missing operator address for node: {host['host_nickname']}. "
+ f"Deploy ursula first to create an operator address."
+ )
- host_op_address = host.get('operator address')
- balance = web3.eth.get_balance(host_op_address)
- existing_balance = self.get_wallet_balance(
- host_op_address, eth=True)
- self.emitter.echo(
- f"existing balance for node: {name}: {existing_balance}")
+ host_op_address = host.get("operator address")
+ _balance = web3.eth.get_balance(host_op_address)
+ existing_balance = self.get_wallet_balance(host_op_address, eth=True)
+ self.emitter.echo(f"existing balance for node: {name}: {existing_balance}")
if existing_balance >= amount:
self.emitter.echo(
- f"host {name} already has {existing_balance} ETH. funded.")
+ f"host {name} already has {existing_balance} ETH. funded."
+ )
else:
tx_hash = self.send_eth(wallet, host_op_address, amount)
while next_tx is False:
-
try:
- tx_state = web3.eth.get_transaction(tx_hash)
+ _tx_state = web3.eth.get_transaction(tx_hash)
if self.get_wallet_balance(host_op_address, eth=True) > 0:
next_tx = True
else:
time.sleep(1)
except TransactionNotFound:
- self.emitter.echo(
- 'waiting for transaction confirmation...')
+ self.emitter.echo("waiting for transaction confirmation...")
time.sleep(1)
self.emitter.echo(
- f"Broadcast transaction {tx_hash} for node: {host['host_nickname']}")
+ f"Broadcast transaction {tx_hash} for node: {host['host_nickname']}"
+ )
@needs_provider
def send_eth(self, web3, wallet, destination_address, amount_eth):
-
transaction = {
- 'chainId': self.chain_id,
- "nonce": web3.eth.getTransactionCount(wallet.address, 'pending'),
+ "chainId": self.chain_id,
+ "nonce": web3.eth.getTransactionCount(wallet.address, "pending"),
"from": wallet.address,
"to": destination_address,
- "value": web3.toWei(amount_eth, 'ether'),
+ "value": web3.toWei(amount_eth, "ether"),
"gas": 21000,
- "gasPrice": web3.eth.gasPrice * 2
+ "gasPrice": web3.eth.gasPrice * 2,
}
signed_tx = wallet.sign_transaction(transaction)
return web3.eth.send_raw_transaction(signed_tx.rawTransaction).hex()
def get_backup_path_by_nickname(self, nickname):
- return os.path.join(self.backup_directory, self.config['instances'][nickname]['publicaddress'])
+ return os.path.join(
+ self.backup_directory, self.config["instances"][nickname]["publicaddress"]
+ )
def get_node_config(self, nickname):
- return self.config['instances'][nickname]
+ return self.config["instances"][nickname]
@needs_provider
def defund_nodes(self, web3, hostnames, to=None, amount=None):
for hostname in hostnames:
- amount_to_send = None
+ _amount_to_send = None
backuppath = self.get_backup_path_by_nickname(hostname)
- nodeconfig = self.get_node_config(hostname)
- for keystorepath in Path(backuppath).rglob('*UTC*'): # should only be one
- ethpw = self.config['ethpassword']
+ _nodeconfig = self.get_node_config(hostname)
+ for keystorepath in Path(backuppath).rglob("*UTC*"): # should only be one
+ ethpw = self.config["ethpassword"]
with open(keystorepath) as keyfile:
encrypted_key = keyfile.read()
- private_key = web3.eth.account.decrypt(
- encrypted_key, ethpw)
+ private_key = web3.eth.account.decrypt(encrypted_key, ethpw)
wallet = web3.eth.account.from_key(private_key)
balance = web3.eth.get_balance(wallet.address)
if not balance:
- self.emitter.echo(f'{hostname} has no ETH')
+ self.emitter.echo(f"{hostname} has no ETH")
continue
if amount:
- amount_to_send = web3.toWei(amount, 'ether')
+ amount_to_send = web3.toWei(amount, "ether")
else:
# we are sending all of it
needed_gas = web3.eth.gasPrice * 21000 * 2
@@ -1051,7 +1154,10 @@ def defund_nodes(self, web3, hostnames, to=None, amount=None):
amount_to_send = amount_minus_gas
if amount_to_send < 0:
- msg = f"amount to send, including transaction gas: {web3.fromWei(max(amount_to_send, needed_gas), 'ether')} is more than total available ETH ({web3.fromWei(balance, 'ether')})"
+ msg = (
+ f"amount to send, including transaction gas: {web3.fromWei(max(amount_to_send, needed_gas), 'ether')} "
+ f"is more than total available ETH ({web3.fromWei(balance, 'ether')})"
+ )
if len(hostnames) > 1:
# keep going but notify
self.emitter.echo(msg)
@@ -1059,309 +1165,345 @@ def defund_nodes(self, web3, hostnames, to=None, amount=None):
else:
raise AttributeError(msg)
self.emitter.echo(
- f"Attempting to send {web3.fromWei(amount_to_send, 'ether')} ETH from {hostname} to {to} in 3 seconds.")
+ f"Attempting to send {web3.fromWei(amount_to_send, 'ether')} ETH from {hostname} to {to} in 3 seconds."
+ )
time.sleep(3)
result = self.send_eth(
- wallet, to, web3.fromWei(amount_to_send, 'ether'))
- self.emitter.echo(f'Broadcast transaction: {result}')
+ wallet, to, web3.fromWei(amount_to_send, "ether")
+ )
+ self.emitter.echo(f"Broadcast transaction: {result}")
class DigitalOceanConfigurator(BaseCloudNodeConfigurator):
-
- default_region = 'SFO3'
- provider_name = 'digitalocean'
+ default_region = "SFO3"
+ provider_name = "digitalocean"
def get_region(self):
-
regions = [
- 'NYC1',
- 'NYC3',
- 'AMS3',
- 'SFO3',
- 'SGP1',
- 'LON1',
- 'FRA1',
- 'TOR1',
- 'BLR1'
+ "NYC1",
+ "NYC3",
+ "AMS3",
+ "SFO3",
+ "SGP1",
+ "LON1",
+ "FRA1",
+ "TOR1",
+ "BLR1",
]
- region = self.kwargs.get('region') or self.config.get('digital-ocean-region')
+ region = self.kwargs.get("region") or self.config.get("digital-ocean-region")
if not region:
- region = os.environ.get('DIGITALOCEAN_REGION')
+ region = os.environ.get("DIGITALOCEAN_REGION")
if region:
# if using env variable ensure that it is what the user wants
- use_region = self.emitter.prompt(
- f"No explicit region value defined; using region value from 'DIGITALOCEAN_REGION' environment variable: {region}. Continue? (type 'yes')") == "yes"
+ use_region = (
+ self.emitter.prompt(
+ f"No explicit region value defined; using region value from 'DIGITALOCEAN_REGION' environment variable: {region}. Continue? (type 'yes')"
+ )
+ == "yes"
+ )
if not use_region:
# reset region so that random one is used instead
region = None
if region:
- self.emitter.echo(f'Using Digital Ocean region: {region}')
- if not region in regions:
+ self.emitter.echo(f"Using Digital Ocean region: {region}")
+ if region not in regions:
raise AttributeError(
- f"{region} is not a valid DigitalOcean region. Find available regions here: https://www.digitalocean.com/docs/platform/availability-matrix/")
+ f"{region} is not a valid DigitalOcean region. Find available regions here: https://www.digitalocean.com/docs/platform/availability-matrix/"
+ )
else:
region = random.choice(regions)
self.emitter.echo(
- f'Randomly choosing DigitalOcean region: {region}, to change regions call this command with --region specified or `export DIGITALOCEAN_REGION: https://www.digitalocean.com/docs/platform/availability-matrix/\n', color='yellow')
+ f"Randomly choosing DigitalOcean region: {region}, to change regions call this command with --region specified or `export DIGITALOCEAN_REGION: https://www.digitalocean.com/docs/platform/availability-matrix/\n",
+ color="yellow",
+ )
return region
@property
def instance_size(self):
- return self.kwargs.get('instance_type') or "s-1vcpu-2gb"
+ return self.kwargs.get("instance_type") or "s-1vcpu-2gb"
@property
def _provider_deploy_attrs(self):
return [
- {'key': 'default_user', 'value': 'root'},
+ {"key": "default_user", "value": "root"},
]
def _configure_provider_params(self):
-
- self.token = self.config.get('digital-ocean-access-token')
+ self.token = self.config.get("digital-ocean-access-token")
if not self.token:
self.emitter.echo(
- "checking envvar DIGITALOCEAN_ACCESS_TOKEN for access token...")
- self.token = os.environ.get('DIGITALOCEAN_ACCESS_TOKEN')
+ "checking envvar DIGITALOCEAN_ACCESS_TOKEN for access token..."
+ )
+ self.token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN")
if not self.token:
self.token = self.emitter.prompt(
- f"Please enter your Digital Ocean Access Token which can be created here: https://cloud.digitalocean.com/account/api/tokens. It looks like this: b34abcDEF17ABCDEFAbcDEf09fd72a28425ABCDEF8b198e9623ABCDEFc11591. You can also `export DIGITALOCEAN_ACCESS_TOKEN=`")
+ "Please enter your Digital Ocean Access Token which can be created here: https://cloud.digitalocean.com/account/api/tokens. "
+ "It looks like this: b34abcDEF17ABCDEFAbcDEf09fd72a28425ABCDEF8b198e9623ABCDEFc11591. You can also `export DIGITALOCEAN_ACCESS_TOKEN=`"
+ )
if not self.token:
raise AttributeError(
- "Could not continue without token or DIGITALOCEAN_ACCESS_TOKEN environment variable.")
+ "Could not continue without token or DIGITALOCEAN_ACCESS_TOKEN environment variable."
+ )
self.region = self.get_region()
- self.sshkey = self.config.get('sshkey')
+ self.sshkey = self.config.get("sshkey")
if not self.sshkey:
self.emitter.echo(
- "checking envvar DIGITAL_OCEAN_KEY_FINGERPRINT for key fingerprint...")
- self.sshkey = os.environ.get('DIGITAL_OCEAN_KEY_FINGERPRINT')
+ "checking envvar DIGITAL_OCEAN_KEY_FINGERPRINT for key fingerprint..."
+ )
+ self.sshkey = os.environ.get("DIGITAL_OCEAN_KEY_FINGERPRINT")
if not self.sshkey:
self.sshkey = self.emitter.prompt(
- f"Please `enter the fingerprint of a Digital Ocean SSH Key which can be created here: https://cloud.digitalocean.com/account/security. They look like this: 8a:db:8f:4c:b1:61:fa:84:21:30:4d:d6:77:3b:a1:4d")
+ "Please `enter the fingerprint of a Digital Ocean SSH Key which can be created here: https://cloud.digitalocean.com/account/security. "
+ "They look like this: 8a:db:8f:4c:b1:61:fa:84:21:30:4d:d6:77:3b:a1:4d"
+ )
if not self.sshkey:
self.emitter.echo(
- "Please set the name of your Digital Ocean SSH Key (`export DIGITAL_OCEAN_KEY_FINGERPRINT=` from here: https://cloud.digitalocean.com/account/security", color="red")
+ "Please set the name of your Digital Ocean SSH Key (`export DIGITAL_OCEAN_KEY_FINGERPRINT=` from here: https://cloud.digitalocean.com/account/security",
+ color="red",
+ )
self.emitter.echo(
- "it should look like `DIGITAL_OCEAN_KEY_FINGERPRINT=88:fb:53:51:09:aa:af:02:e2:99:95:2d:39:64:c1:64`\n", color="red")
+ "it should look like `DIGITAL_OCEAN_KEY_FINGERPRINT=88:fb:53:51:09:aa:af:02:e2:99:95:2d:39:64:c1:64`\n",
+ color="red",
+ )
raise AttributeError(
- "Could not continue without DIGITAL_OCEAN_KEY_FINGERPRINT.")
+ "Could not continue without DIGITAL_OCEAN_KEY_FINGERPRINT."
+ )
- self.config['sshkey'] = self.sshkey
- self.config['digital-ocean-region'] = self.region
- self.config['digital-ocean-access-token'] = self.token
+ self.config["sshkey"] = self.sshkey
+ self.config["digital-ocean-region"] = self.region
+ self.config["digital-ocean-access-token"] = self.token
self._write_config()
def create_new_node(self, node_name):
-
- response = requests.post("https://api.digitalocean.com/v2/droplets",
- {
- "name": f'{node_name}',
- "region": self.region,
- "size": self.instance_size,
- "image": "ubuntu-20-04-x64",
- "ssh_keys": [self.sshkey]
- },
- headers={
- "Authorization": f'Bearer {self.token}'
- }
- )
+ response = requests.post(
+ "https://api.digitalocean.com/v2/droplets",
+ {
+ "name": f"{node_name}",
+ "region": self.region,
+ "size": self.instance_size,
+ "image": "ubuntu-20-04-x64",
+ "ssh_keys": [self.sshkey],
+ },
+ headers={"Authorization": f"Bearer {self.token}"},
+ )
if response.status_code < 300:
resp = response.json()
- new_node_id = resp['droplet']['id']
- node_data = {'InstanceId': new_node_id}
+ new_node_id = resp["droplet"]["id"]
+ node_data = {"InstanceId": new_node_id}
self.emitter.echo("\twaiting for instance to come online...")
instance_public_ip = None
self.emitter.echo("checking for node's public IP")
- with self.emitter.progressbar(length=30, show_eta=False, show_percent=False) as bar:
+ with self.emitter.progressbar(
+ length=30, show_eta=False, show_percent=False
+ ) as bar:
while not instance_public_ip:
bar.update(1)
time.sleep(1)
- instance_resp = requests.get(
- f'https://api.digitalocean.com/v2/droplets/{new_node_id}/',
- headers={
- "Authorization": f'Bearer {self.token}'
- }
- ).json().get('droplet')
- if instance_resp['status'] == 'active':
- if instance_resp.get('networks', {}).get('v4'):
+ instance_resp = (
+ requests.get(
+ f"https://api.digitalocean.com/v2/droplets/{new_node_id}/",
+ headers={"Authorization": f"Bearer {self.token}"},
+ )
+ .json()
+ .get("droplet")
+ )
+ if instance_resp["status"] == "active":
+ if instance_resp.get("networks", {}).get("v4"):
instance_public_ip = next(
- (n['ip_address'] for n in instance_resp['networks']['v4'] if n['type'] == 'public'), None)
+ (
+ n["ip_address"]
+ for n in instance_resp["networks"]["v4"]
+ if n["type"] == "public"
+ ),
+ None,
+ )
bar.update(30)
- node_data['publicaddress'] = instance_public_ip
- node_data['provider_deploy_attrs'] = self._provider_deploy_attrs
- node_data['installed'] = []
+ node_data["publicaddress"] = instance_public_ip
+ node_data["provider_deploy_attrs"] = self._provider_deploy_attrs
+ node_data["installed"] = []
return node_data
else:
- self.emitter.echo(response.text, color='red')
+ self.emitter.echo(response.text, color="red")
raise BaseException("Error creating resources in DigitalOcean")
def _destroy_resources(self, node_names):
-
existing_instances = {
- k: v for k, v in self.get_provider_hosts() if k in node_names}
+ k: v for k, v in self.get_provider_hosts() if k in node_names
+ }
if existing_instances:
self.emitter.echo(
- f"\nAbout to destroy the following: {', '.join(existing_instances.keys())}, including all local data about these nodes.")
+ f"\nAbout to destroy the following: {', '.join(existing_instances.keys())}, including all local data about these nodes."
+ )
self.emitter.echo("\ntype 'y' to continue")
- if self.emitter.getchar(echo=False) == 'y':
+ if self.emitter.getchar(echo=False) == "y":
for node_name, instance in existing_instances.items():
- if node_names and not node_name in node_names:
+ if node_names and node_name not in node_names:
continue
self.emitter.echo(
- f"deleting worker instance for {node_name} in 3 seconds...", color='red')
+ f"deleting worker instance for {node_name} in 3 seconds...",
+ color="red",
+ )
time.sleep(3)
result = requests.delete(
f'https://api.digitalocean.com/v2/droplets/{instance["InstanceId"]}/',
- headers={
- "Authorization": f'Bearer {self.token}'
- })
+ headers={"Authorization": f"Bearer {self.token}"},
+ )
- if result.status_code == 204 or 'not_found' in result.text:
- self.emitter.echo(
- f"\tdestroyed instance for {node_name}")
- del self.config['instances'][node_name]
+ if result.status_code == 204 or "not_found" in result.text:
+ self.emitter.echo(f"\tdestroyed instance for {node_name}")
+ del self.config["instances"][node_name]
self._write_config()
else:
raise Exception(
- f"Errors occurred while deleting node: {result.text}")
+ f"Errors occurred while deleting node: {result.text}"
+ )
return True
class AWSNodeConfigurator(BaseCloudNodeConfigurator):
-
URSULA_PORT = 9151
PROMETHEUS_PORTS = [9101]
OTHER_INGRESS_PORTS = [(9601, 9601), (3919, 3919)]
- provider_name = 'aws'
+ provider_name = "aws"
# Ubuntu AWS EC2 cloud images by region - https://cloud-images.ubuntu.com/locator/ec2/
EC2_AMI_LOOKUP = {
- 'us-west-2': 'ami-07f3835078238cf5f', # Oregon (previous 'ami-09dd2e08d601bff67')
- 'us-west-1': 'ami-060810abef0876bf9', # California (previous 'ami-021809d9177640a20')
- 'us-east-2': 'ami-05e2e289878082d62', # Ohio (previous 'ami-07efac79022b86107')
- 'us-east-1': 'ami-07a72d328538fc075', # N. Virginia (previous 'ami-0dba2cb6798deb6d8')
- 'ca-central-1': 'ami-00f9d48672cdfb082', # Canada (previous 'ami-092ae90a292e01141')
- 'eu-west-1': 'ami-013ee89145538ca58', # Ireland
- 'eu-west-2': 'ami-04cac1713d99a8a58', # London
- 'eu-west-3': 'ami-036d1e148d3009384', # Paris (previous 'ami-0c3be2097e1270c89')
- 'eu-north-1': 'ami-0c93e624d16d7d54b', # Stockholm
- 'eu-central-1': 'ami-08868ffb88a12d582', # Frankfurt (previous 'ami-0c960b947cbb2dd16')
- 'ap-northeast-1': 'ami-02a48cc8e65575754', # Tokyo (previous 'ami-09b86f9709b3c33d4')
- 'ap-northeast-2': 'ami-0be886bd314f8bd39', # Seoul
- 'ap-southeast-1': 'ami-09e450813d49ccb3d', # Singapore (previous 'ami-093da183b859d5a4b')
- 'ap-southeast-2': 'ami-02884b059f16723fb', # Sydney
- 'sa-east-1': 'ami-0ba6b2f8309cc9e14' # Sao Paolo (previous 'ami-090006f29ecb2d79a')
+ "us-west-2": "ami-07f3835078238cf5f", # Oregon (previous 'ami-09dd2e08d601bff67')
+ "us-west-1": "ami-060810abef0876bf9", # California (previous 'ami-021809d9177640a20')
+ "us-east-2": "ami-05e2e289878082d62", # Ohio (previous 'ami-07efac79022b86107')
+ "us-east-1": "ami-07a72d328538fc075", # N. Virginia (previous 'ami-0dba2cb6798deb6d8')
+ "ca-central-1": "ami-00f9d48672cdfb082", # Canada (previous 'ami-092ae90a292e01141')
+ "eu-west-1": "ami-013ee89145538ca58", # Ireland
+ "eu-west-2": "ami-04cac1713d99a8a58", # London
+ "eu-west-3": "ami-036d1e148d3009384", # Paris (previous 'ami-0c3be2097e1270c89')
+ "eu-north-1": "ami-0c93e624d16d7d54b", # Stockholm
+ "eu-central-1": "ami-08868ffb88a12d582", # Frankfurt (previous 'ami-0c960b947cbb2dd16')
+ "ap-northeast-1": "ami-02a48cc8e65575754", # Tokyo (previous 'ami-09b86f9709b3c33d4')
+ "ap-northeast-2": "ami-0be886bd314f8bd39", # Seoul
+ "ap-southeast-1": "ami-09e450813d49ccb3d", # Singapore (previous 'ami-093da183b859d5a4b')
+ "ap-southeast-2": "ami-02884b059f16723fb", # Sydney
+ "sa-east-1": "ami-0ba6b2f8309cc9e14", # Sao Paolo (previous 'ami-090006f29ecb2d79a')
}
- preferred_platform = 'ubuntu-focal' # unused
+ preferred_platform = "ubuntu-focal" # unused
@property
def _provider_deploy_attrs(self):
return [
- {'key': 'ansible_ssh_private_key_file',
- 'value': self.config['keypair_path']},
- {'key': 'default_user', 'value': 'ubuntu'}
+ {
+ "key": "ansible_ssh_private_key_file",
+ "value": self.config["keypair_path"],
+ },
+ {"key": "default_user", "value": "ubuntu"},
]
def _configure_provider_params(self):
-
# some attributes we will configure later
self.vpc = None
# find aws profiles on user's local environment
available_profiles = boto3.session.Session().available_profiles
- choice_list = '\n\t'.join(available_profiles)
+ choice_list = "\n\t".join(available_profiles)
- self.profile = self.config.get(
- 'aws-profile') or os.getenv('AWS_PROFILE')
+ self.profile = self.config.get("aws-profile") or os.getenv("AWS_PROFILE")
if not self.profile:
self.profile = self.emitter.prompt(
f"please select an AWS profile from the following options: \n{choice_list}",
type=self.emitter.Choice(available_profiles),
- show_choices=False
+ show_choices=False,
)
- self.emitter.echo(f'using profile: {self.profile}')
+ self.emitter.echo(f"using profile: {self.profile}")
- self.AWS_REGION = self.config.get('aws-region')
+ self.AWS_REGION = self.config.get("aws-region")
if not self.AWS_REGION:
- self.AWS_REGION = os.environ.get('AWS_DEFAULT_REGION')
+ self.AWS_REGION = os.environ.get("AWS_DEFAULT_REGION")
if self.AWS_REGION:
- use_region = self.emitter.prompt(
- f"No explicit region value defined; using region value from 'AWS_DEFAULT_REGION' environment variable: {self.AWS_REGION}. Continue? (type 'yes')") == "yes"
+ use_region = (
+ self.emitter.prompt(
+ f"No explicit region value defined; using region value from 'AWS_DEFAULT_REGION' environment variable: {self.AWS_REGION}. Continue? (type 'yes')"
+ )
+ == "yes"
+ )
if not use_region:
# prompt for region
self.AWS_REGION = None
if not self.AWS_REGION:
session = boto3.Session(profile_name=self.profile)
- ec2 = session.client('ec2')
- available_regions = [r['RegionName']
- for r in ec2.describe_regions()['Regions']]
- region_choice_list = '\n\t'.join(available_regions)
+ ec2 = session.client("ec2")
+ available_regions = [
+ r["RegionName"] for r in ec2.describe_regions()["Regions"]
+ ]
+ region_choice_list = "\n\t".join(available_regions)
self.AWS_REGION = self.emitter.prompt(
f"please select an AWS region from the following regions: {region_choice_list}\n",
type=self.emitter.Choice(available_regions),
- show_choices=False
+ show_choices=False,
)
# init the session with a region specified
self.session = boto3.Session(
- profile_name=self.profile, region_name=self.AWS_REGION)
- self.ec2Client = ec2 = self.session.client('ec2')
+ profile_name=self.profile, region_name=self.AWS_REGION
+ )
+ self.ec2Client = ec2 = self.session.client("ec2")
- self.EC2_INSTANCE_SIZE = self.kwargs.get('instance_type')
+ self.EC2_INSTANCE_SIZE = self.kwargs.get("instance_type")
- if self.action == 'create':
+ if self.action == "create":
if self.EC2_INSTANCE_SIZE is None:
instance_types = ec2.describe_instance_type_offerings(
# LocationType = 'region',
Filters=[
- {'Values': ['t1.*', 't2.*', 't3.*',
- 't3a.*'], 'Name': 'instance-type'},
+ {
+ "Values": ["t1.*", "t2.*", "t3.*", "t3a.*"],
+ "Name": "instance-type",
+ },
# {'Values': [self.AWS_REGION], 'Name': 'location'}
]
- )['InstanceTypeOfferings']
+ )["InstanceTypeOfferings"]
instance_type_choices = sorted(
- [r['InstanceType'] for r in instance_types])
- instance_type_choice_list = '\n\t'.join(instance_type_choices)
+ [r["InstanceType"] for r in instance_types]
+ )
+ instance_type_choice_list = "\n\t".join(instance_type_choices)
self.EC2_INSTANCE_SIZE = self.emitter.prompt(
f"please select an instance type from the following choices:\n\t{instance_type_choice_list}\n",
type=self.emitter.Choice(instance_type_choices),
- show_choices=False
+ show_choices=False,
)
self.emitter.echo(f"Instance type: {self.EC2_INSTANCE_SIZE}")
self.emitter.echo(f"AWS Region: {self.AWS_REGION}")
- self.ec2Client = self.session.client('ec2')
- self.ec2Resource = self.session.resource('ec2')
+ self.ec2Client = self.session.client("ec2")
+ self.ec2Resource = self.session.resource("ec2")
- self.config['aws-profile'] = self.profile
+ self.config["aws-profile"] = self.profile
- self.keypair = self.config.get('keypair')
+ self.keypair = self.config.get("keypair")
if not self.keypair:
self.keypair, keypair_path = self._create_keypair()
- self.config['keypair_path'] = str(keypair_path)
+ self.config["keypair_path"] = str(keypair_path)
- self.config['aws-profile'] = self.profile
- self.config['keypair'] = self.keypair
- self.config['aws-region'] = self.AWS_REGION
+ self.config["aws-profile"] = self.profile
+ self.config["keypair"] = self.keypair
+ self.config["aws-region"] = self.AWS_REGION
self._write_config()
@property
@@ -1371,29 +1513,35 @@ def aws_tags(self):
def _create_keypair(self):
new_keypair_data = self.ec2Client.create_key_pair(
- KeyName=f'{self.namespace_network}')
- out_path = DEFAULT_CONFIG_ROOT / NODE_CONFIG_STORAGE_KEY / \
- f'{self.namespace_network}.awskeypair'
+ KeyName=f"{self.namespace_network}"
+ )
+ out_path = (
+ DEFAULT_CONFIG_ROOT
+ / NODE_CONFIG_STORAGE_KEY
+ / f"{self.namespace_network}.awskeypair"
+ )
out_path.parent.mkdir(parents=True, exist_ok=True)
- with open(out_path, 'w') as outfile:
- outfile.write(new_keypair_data['KeyMaterial'])
+ with open(out_path, "w") as outfile:
+ outfile.write(new_keypair_data["KeyMaterial"])
# set local keypair permissions https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
out_path.chmod(0o400)
self.emitter.echo(
- f"a new aws keypair was saved to {out_path}, keep it safe.", color='yellow')
- return new_keypair_data['KeyName'], out_path
+ f"a new aws keypair was saved to {out_path}, keep it safe.", color="yellow"
+ )
+ return new_keypair_data["KeyName"], out_path
def _delete_keypair(self):
# only use self.namespace here to avoid accidental deletions of pre-existing keypairs
deleted_keypair_data = self.ec2Client.delete_key_pair(
- KeyName=f'{self.namespace_network}')
- if deleted_keypair_data['HTTPStatusCode'] == 200:
+ KeyName=f"{self.namespace_network}"
+ )
+ if deleted_keypair_data["HTTPStatusCode"] == 200:
outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(
- NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair')
+ NODE_CONFIG_STORAGE_KEY, f"{self.namespace_network}.awskeypair"
+ )
outpath.unlink()
- self.emitter.echo(
- f"keypair at {outpath}, was deleted", color='yellow')
+ self.emitter.echo(f"keypair at {outpath}, was deleted", color="yellow")
def _ensure_vpc(self):
"""creates an aws virtual private cloud if one doesn't exist"""
@@ -1402,101 +1550,123 @@ def _ensure_vpc(self):
from botocore import exceptions as botoexceptions
except ImportError:
self.emitter.echo(
- "You need to have boto3 installed to use this feature (pip3 install boto3)")
+ "You need to have boto3 installed to use this feature (pip3 install boto3)"
+ )
return
if not self.vpc:
- vpc_id = self.config.get('Vpc')
+ vpc_id = self.config.get("Vpc")
if vpc_id:
self.vpc = self.ec2Resource.Vpc(vpc_id)
else:
try:
- vpcdata = self.ec2Client.create_vpc(
- CidrBlock='172.16.0.0/16')
+ vpcdata = self.ec2Client.create_vpc(CidrBlock="172.16.0.0/16")
except botoexceptions.NoCredentialsError:
raise ValueError(
- f'Could create AWS resource with profile "{self.profile}" and keypair "{self.keypair}", please run this command with --aws-profile and --aws-keypair to specify matching aws credentials')
- self.vpc = self.ec2Resource.Vpc(vpcdata['Vpc']['VpcId'])
+ f'Could create AWS resource with profile "{self.profile}" and keypair "{self.keypair}", '
+ f'please run this command with --aws-profile and --aws-keypair to specify matching aws credentials'
+ )
+ self.vpc = self.ec2Resource.Vpc(vpcdata["Vpc"]["VpcId"])
self.vpc.wait_until_available()
self.vpc.create_tags(Tags=self.aws_tags)
- self.vpc.modify_attribute(EnableDnsSupport={'Value': True})
- self.vpc.modify_attribute(EnableDnsHostnames={'Value': True})
- self.config['Vpc'] = vpc_id = self.vpc.id
+ self.vpc.modify_attribute(EnableDnsSupport={"Value": True})
+ self.vpc.modify_attribute(EnableDnsHostnames={"Value": True})
+ self.config["Vpc"] = vpc_id = self.vpc.id
self._write_config()
return self.vpc
def _configure_path_to_internet(self):
"""
- create and configure all the little AWS bits we need to get an internet request
- from the internet to our node and back
+ create and configure all the little AWS bits we need to get an internet request
+ from the internet to our node and back
"""
- if not self.config.get('InternetGateway'):
+ if not self.config.get("InternetGateway"):
gatewaydata = self.ec2Client.create_internet_gateway()
- self.config['InternetGateway'] = gateway_id = gatewaydata['InternetGateway']['InternetGatewayId']
+ self.config["InternetGateway"] = gatewaydata[
+ "InternetGateway"
+ ]["InternetGatewayId"]
# tag it
self._write_config()
self.ec2Resource.InternetGateway(
- self.config['InternetGateway']).create_tags(Tags=self.aws_tags)
+ self.config["InternetGateway"]
+ ).create_tags(Tags=self.aws_tags)
self.vpc.attach_internet_gateway(
- InternetGatewayId=self.config['InternetGateway'])
+ InternetGatewayId=self.config["InternetGateway"]
+ )
- routetable_id = self.config.get('RouteTable')
+ routetable_id = self.config.get("RouteTable")
if not routetable_id:
routetable = self.vpc.create_route_table()
- self.config['RouteTable'] = routetable_id = routetable.id
+ self.config["RouteTable"] = routetable_id = routetable.id
self._write_config()
routetable.create_tags(Tags=self.aws_tags)
routetable = self.ec2Resource.RouteTable(routetable_id)
routetable.create_route(
- DestinationCidrBlock='0.0.0.0/0', GatewayId=self.config['InternetGateway'])
+ DestinationCidrBlock="0.0.0.0/0", GatewayId=self.config["InternetGateway"]
+ )
- if not self.config.get('Subnet'):
+ if not self.config.get("Subnet"):
subnetdata = self.ec2Client.create_subnet(
- CidrBlock='172.16.1.0/24', VpcId=self.vpc.id)
- self.config['Subnet'] = subnet_id = subnetdata['Subnet']['SubnetId']
+ CidrBlock="172.16.1.0/24", VpcId=self.vpc.id
+ )
+ self.config["Subnet"] = subnet_id = subnetdata["Subnet"]["SubnetId"]
self._write_config()
self.ec2Resource.Subnet(subnet_id).create_tags(Tags=self.aws_tags)
- routetable.associate_with_subnet(SubnetId=self.config['Subnet'])
+ routetable.associate_with_subnet(SubnetId=self.config["Subnet"])
- if self.config.get('SecurityGroup'):
- self.emitter.echo(f"SecurityGroup already exists for {self.namespace_network}; skipping port ingress configuration")
+ if self.config.get("SecurityGroup"):
+ self.emitter.echo(
+ f"SecurityGroup already exists for {self.namespace_network}; skipping port ingress configuration"
+ )
return
else:
securitygroupdata = self.ec2Client.create_security_group(
- GroupName=f'NuOps-{self.namespace_network}', Description='ssh and other ports', VpcId=self.config['Vpc'])
- self.config['SecurityGroup'] = sg_id = securitygroupdata['GroupId']
+ GroupName=f"NuOps-{self.namespace_network}",
+ Description="ssh and other ports",
+ VpcId=self.config["Vpc"],
+ )
+ self.config["SecurityGroup"] = securitygroupdata["GroupId"]
self._write_config()
- securitygroup = self.ec2Resource.SecurityGroup(
- self.config['SecurityGroup'])
+ securitygroup = self.ec2Resource.SecurityGroup(self.config["SecurityGroup"])
securitygroup.create_tags(Tags=self.aws_tags)
# TODO configure security group based on application (ursula or tbtc); for now all ports for ursula / tbtc are opened when security group was created
securitygroup.authorize_ingress(
- CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=22, ToPort=22)
+ CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=22, ToPort=22
+ )
# TODO: is it always 9151? Does that matter? Should this be configurable?
securitygroup.authorize_ingress(
- CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=self.URSULA_PORT, ToPort=self.URSULA_PORT)
+ CidrIp="0.0.0.0/0",
+ IpProtocol="tcp",
+ FromPort=self.URSULA_PORT,
+ ToPort=self.URSULA_PORT,
+ )
for port in self.PROMETHEUS_PORTS:
securitygroup.authorize_ingress(
- CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=port, ToPort=port)
+ CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=port, ToPort=port
+ )
- for (source, dest) in self.OTHER_INGRESS_PORTS:
+ for source, dest in self.OTHER_INGRESS_PORTS:
securitygroup.authorize_ingress(
- CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=source, ToPort=dest)
+ CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=source, ToPort=dest
+ )
def _do_setup_for_instance_creation(self):
- if not getattr(self, 'profile', None):
+ if not getattr(self, "profile", None):
self.emitter.echo(
- "Aws nodes can only be created with an aws profile. (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html)", color='red')
+ "Aws nodes can only be created with an aws profile. (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html)",
+ color="red",
+ )
raise AttributeError("AWS profile not configured.")
self.emitter.echo(
- "ensuring that prerequisite cloud resources exist for instance creation.")
+ "ensuring that prerequisite cloud resources exist for instance creation."
+ )
self._ensure_vpc()
self._configure_path_to_internet()
self.emitter.echo("all prerequisite cloud resources do exist.")
@@ -1506,165 +1676,178 @@ def _destroy_resources(self, node_names):
from botocore import exceptions as botoexceptions
except ImportError:
self.emitter.echo(
- "You need to have boto3 installed to use this feature (pip3 install boto3)")
+ "You need to have boto3 installed to use this feature (pip3 install boto3)"
+ )
return
- existing_instances = {k: v for k, v in self.config.get(
- 'instances', {}).items() if k in node_names}
- vpc = self.ec2Resource.Vpc(self.config['Vpc'])
+ existing_instances = {
+ k: v for k, v in self.config.get("instances", {}).items() if k in node_names
+ }
+ vpc = self.ec2Resource.Vpc(self.config["Vpc"])
if existing_instances:
for node_name, instance in existing_instances.items():
- if node_names and not node_name in node_name:
+ if node_names and node_name not in node_name:
continue
self.emitter.echo(
- f"deleting worker instance for {node_name} in 3 seconds...", color='red')
+ f"deleting worker instance for {node_name} in 3 seconds...",
+ color="red",
+ )
time.sleep(3)
- self.ec2Resource.Instance(instance['InstanceId']).terminate()
- del self.config['instances'][node_name]
+ self.ec2Resource.Instance(instance["InstanceId"]).terminate()
+ del self.config["instances"][node_name]
self.emitter.echo(f"\tdestroyed instance for {node_name}")
self._write_config()
if not len(self.get_provider_hosts()):
self.emitter.echo("waiting for instance termination...")
time.sleep(10)
- for subresource in ['Subnet', 'RouteTable', 'SecurityGroup']:
+ for subresource in ["Subnet", "RouteTable", "SecurityGroup"]:
tries = 0
while self.config.get(subresource) and tries < 10:
- self.emitter.echo(f'deleting {subresource}')
+ self.emitter.echo(f"deleting {subresource}")
try:
getattr(self.ec2Resource, subresource)(
- self.config[subresource]).delete()
+ self.config[subresource]
+ ).delete()
self.emitter.echo(
- f'deleted {subresource}: {self.config[subresource]}')
+ f"deleted {subresource}: {self.config[subresource]}"
+ )
del self.config[subresource]
self._write_config()
except botoexceptions.ClientError as e:
tries += 1
self.emitter.echo(
- f'failed to delete {subresource}, because: {e}.. trying again in 10...', color="yellow")
+ f"failed to delete {subresource}, because: {e}.. trying again in 10...",
+ color="yellow",
+ )
time.sleep(10)
if tries > 10:
self.emitter.echo(
- "some resources could not be deleted because AWS is taking awhile to delete things. Run this command again in a minute or so...", color="yellow")
+ "some resources could not be deleted because AWS is taking awhile to delete things. Run this command again in a minute or so...",
+ color="yellow",
+ )
return False
- if self.config.get('InternetGateway'):
+ if self.config.get("InternetGateway"):
self.ec2Resource.InternetGateway(
- self.config['InternetGateway']).detach_from_vpc(VpcId=self.config['Vpc'])
+ self.config["InternetGateway"]
+ ).detach_from_vpc(VpcId=self.config["Vpc"])
self.ec2Resource.InternetGateway(
- self.config['InternetGateway']).delete()
+ self.config["InternetGateway"]
+ ).delete()
self.emitter.echo(
- f'deleted InternetGateway: {self.config["InternetGateway"]}')
- del self.config['InternetGateway']
+ f'deleted InternetGateway: {self.config["InternetGateway"]}'
+ )
+ del self.config["InternetGateway"]
self._write_config()
- if self.config.get('Vpc'):
+ if self.config.get("Vpc"):
vpc.delete()
self.emitter.echo(f'deleted Vpc: {self.config["Vpc"]}')
- del self.config['Vpc']
+ del self.config["Vpc"]
self._write_config()
- if self.config.get('keypair'):
+ if self.config.get("keypair"):
self.emitter.echo(
- f'deleting keypair {self.keypair} in 5 seconds...', color='red')
+ f"deleting keypair {self.keypair} in 5 seconds...", color="red"
+ )
time.sleep(6)
- self.ec2Client.delete_key_pair(
- KeyName=self.config.get('keypair'))
- del self.config['keypair']
- Path(self.config['keypair_path']).unlink()
- del self.config['keypair_path']
+ self.ec2Client.delete_key_pair(KeyName=self.config.get("keypair"))
+ del self.config["keypair"]
+ Path(self.config["keypair_path"]).unlink()
+ del self.config["keypair_path"]
self._write_config()
return True
def create_new_node(self, node_name):
-
- if not self.EC2_AMI_LOOKUP.get(self.AWS_REGION) or os.environ.get('NUCYPHER_OPS_AWS_AMI'):
+ if not self.EC2_AMI_LOOKUP.get(self.AWS_REGION) or os.environ.get(
+ "NUCYPHER_OPS_AWS_AMI"
+ ):
raise AttributeError(
- "Sorry nucypher-ops does not automatically support this region. Please specify an ami for your instances by setting envar `export NUCYPHER_OPS_AWS_AMI=ami-xxxxxxxxxxxxxx`")
+ "Sorry nucypher-ops does not automatically support this region. "
+ "Please specify an ami for your instances by setting envar `export NUCYPHER_OPS_AWS_AMI=ami-xxxxxxxxxxxxxx`"
+ )
params = dict(
- ImageId=os.environ.get(
- 'NUCYPHER_OPS_AWS_AMI') or self.EC2_AMI_LOOKUP.get(self.AWS_REGION),
+ ImageId=os.environ.get("NUCYPHER_OPS_AWS_AMI")
+ or self.EC2_AMI_LOOKUP.get(self.AWS_REGION),
InstanceType=self.EC2_INSTANCE_SIZE,
KeyName=self.keypair,
NetworkInterfaces=[
{
- 'AssociatePublicIpAddress': True,
- 'DeleteOnTermination': True,
- 'DeviceIndex': 0,
- 'Groups': [
- self.config['SecurityGroup']
- ],
- 'SubnetId': self.config['Subnet'],
+ "AssociatePublicIpAddress": True,
+ "DeleteOnTermination": True,
+ "DeviceIndex": 0,
+ "Groups": [self.config["SecurityGroup"]],
+ "SubnetId": self.config["Subnet"],
},
],
TagSpecifications=[
{
- 'ResourceType': 'instance',
- 'Tags': [
- {
- 'Key': 'Name',
- 'Value': f'{node_name}'
- },
- ]
+ "ResourceType": "instance",
+ "Tags": [
+ {"Key": "Name", "Value": f"{node_name}"},
+ ],
},
],
)
new_instance_data = self.ec2Client.run_instances(
- MaxCount=1,
- MinCount=1,
- **params
+ MaxCount=1, MinCount=1, **params
)
- node_data = {
- 'InstanceId': new_instance_data['Instances'][0]['InstanceId']}
+ node_data = {"InstanceId": new_instance_data["Instances"][0]["InstanceId"]}
instance = self.ec2Resource.Instance(
- new_instance_data['Instances'][0]['InstanceId'])
+ new_instance_data["Instances"][0]["InstanceId"]
+ )
self.emitter.echo("\twaiting for instance to come online...")
instance.wait_until_running()
instance.load()
- node_data['publicaddress'] = instance.public_ip_address
- node_data['provider_deploy_attrs'] = self._provider_deploy_attrs
+ node_data["publicaddress"] = instance.public_ip_address
+ node_data["provider_deploy_attrs"] = self._provider_deploy_attrs
return node_data
class GenericConfigurator(BaseCloudNodeConfigurator):
-
- provider_name = 'generic'
+ provider_name = "generic"
def _write_config(self):
-
- if not self.config_path.exists() and self.action not in self.NAMESSPACE_CREATE_ACTIONS:
+ if (
+ not self.config_path.exists()
+ and self.action not in self.NAMESSPACE_CREATE_ACTIONS
+ ):
raise AttributeError(
- f"Namespace/config '{self.namespace}' does not exist. Show existing namespaces: `nucypher nodes list-namespaces` or create a namespace: `nucypher nodes create`")
+ f"Namespace/config '{self.namespace}' does not exist. Show existing namespaces: `nucypher nodes list-namespaces` or create a namespace: `nucypher nodes create`"
+ )
super()._write_config()
def create_nodes(self, node_names, host_address, login_name, key_path, ssh_port):
- if not self.config.get('instances'):
- self.config['instances'] = {}
+ if not self.config.get("instances"):
+ self.config["instances"] = {}
for node_name in node_names:
- node_data = self.config['instances'].get(node_name, {})
+ node_data = self.config["instances"].get(node_name, {})
if node_data:
self.emitter.echo(
- f"Host info already exists for staker {node_name}; Updating and proceeding.", color="yellow")
+ f"Host info already exists for staker {node_name}; Updating and proceeding.",
+ color="yellow",
+ )
time.sleep(3)
- node_data['publicaddress'] = host_address
- node_data['provider'] = self.provider_name
- node_data['index'] = len(self.config['instances'].keys())
- node_data['provider_deploy_attrs'] = [
- {'key': 'ansible_ssh_private_key_file', 'value': key_path},
- {'key': 'default_user', 'value': login_name},
- {'key': 'ansible_port', 'value': ssh_port}
+ node_data["publicaddress"] = host_address
+ node_data["provider"] = self.provider_name
+ node_data["index"] = len(self.config["instances"].keys())
+ node_data["provider_deploy_attrs"] = [
+ {"key": "ansible_ssh_private_key_file", "value": key_path},
+ {"key": "default_user", "value": login_name},
+ {"key": "ansible_port", "value": ssh_port},
]
- self.config['instances'][node_name] = node_data
- if self.config['seed_network'] and not self.config.get('seed_node'):
- self.config['seed_node'] = node_data['publicaddress']
+ self.config["instances"][node_name] = node_data
+ if self.config["seed_network"] and not self.config.get("seed_node"):
+ self.config["seed_node"] = node_data["publicaddress"]
self._write_config()
self.created_new_nodes = True
@@ -1672,25 +1855,23 @@ def create_nodes(self, node_names, host_address, login_name, key_path, ssh_port)
class GenericDeployer(BaseCloudNodeConfigurator):
-
def deploy(self, node_names):
-
playbook = Path(PLAYBOOKS).joinpath(self.playbook_name)
self.configure_host_level_overrides(node_names)
self.update_generate_inventory(node_names)
for k in node_names:
- installed = self.config['instances'][k].get('installed', [])
+ installed = self.config["instances"][k].get("installed", [])
installed = list(set(installed + [self.application]))
- self.config['instances'][k]['installed'] = installed
+ self.config["instances"][k]["installed"] = installed
self._write_config()
loader = DataLoader()
- inventory = InventoryManager(
- loader=loader, sources=self.inventory_path)
+ inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
- sock=self.emitter, return_results=self.output_capture)
+ sock=self.emitter, return_results=self.output_capture
+ )
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
@@ -1704,27 +1885,29 @@ def deploy(self, node_names):
executor.run()
for k in node_names:
- installed = self.config['instances'][k].get('installed', [])
+ installed = self.config["instances"][k].get("installed", [])
installed = list(set(installed + [self.application]))
- self.config['instances'][k]['installed'] = installed
+ self.config["instances"][k]["installed"] = installed
self._write_config()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
def update_generate_inventory(self, node_names, **kwargs):
-
# filter out the nodes we will not be dealing with
- nodes = {key: value for key,
- value in self.config['instances'].items() if key in node_names}
+ nodes = {
+ key: value
+ for key, value in self.config["instances"].items()
+ if key in node_names
+ }
if not nodes:
raise KeyError(
- f"No hosts matched the supplied names: {node_names}. Try `nucypher-ops nodes list` or create new hosts with `nucypher-ops nodes create`")
+ f"No hosts matched the supplied names: {node_names}. Try `nucypher-ops nodes list` or create new hosts with `nucypher-ops nodes create`"
+ )
defaults = self.default_config()
- for datatype in ['envvars', 'cliargs']:
-
- data_key = f'runtime_{datatype}'
+ for datatype in ["envvars", "cliargs"]:
+ data_key = f"runtime_{datatype}"
input_data = [(k, v) for k, v in getattr(self, datatype)]
@@ -1738,14 +1921,14 @@ def update_generate_inventory(self, node_names, **kwargs):
# we want to update the config with the specified values
# so they will persist in future invocations
- self.config['instances'][key] = copy.deepcopy(nodes[key])
+ self.config["instances"][key] = copy.deepcopy(nodes[key])
# we don't want to save the default_envvars to the config file
# but we do want them to be specified to the inventory template
# but overridden on a per node basis if previously specified
for key, node in nodes.items():
for k, v in defaults[datatype]:
- if not k in nodes[key][data_key]:
+ if k not in nodes[key][data_key]:
nodes[key][data_key][k] = v
for key, node in nodes.items():
@@ -1757,7 +1940,7 @@ def update_generate_inventory(self, node_names, **kwargs):
extra=kwargs,
)
- with open(self.inventory_path, 'w') as outfile:
+ with open(self.inventory_path, "w") as outfile:
outfile.write(inventory_content)
self.emitter.echo(f"wrote new inventory to: {self.inventory_path}")
@@ -1768,86 +1951,95 @@ def update_generate_inventory(self, node_names, **kwargs):
class PorterDeployer(GenericDeployer):
-
- playbook_name = 'setup_porter.yml'
- application = 'porter'
+ playbook_name = "setup_porter.yml"
+ application = "porter"
required_fields = [
- 'eth_provider',
- 'docker_image',
+ "eth_provider",
+ "docker_image",
]
host_level_override_prompts = {
- 'eth_provider': {"prompt": "--eth-provider: please provide the url of a hosted ethereum node (infura/geth) which this porter node can access", "choices": None},
+ "eth_provider": {
+ "prompt": "--eth-provider: please provide the url of a hosted ethereum node (infura/geth) which this porter node can access",
+ "choices": None,
+ },
}
output_capture = {}
@property
def user(self) -> str:
- return 'porter'
+ return "porter"
def default_config(self):
- defaults = {
- 'envvars': [],
- 'cliargs': []
- }
+ defaults = {"envvars": [], "cliargs": []}
return defaults
@property
def _inventory_template(self):
- template_path = Path(TEMPLATES).joinpath('porter_inventory.mako')
+ template_path = Path(TEMPLATES).joinpath("porter_inventory.mako")
return Template(filename=str(template_path))
@property
def inventory_path(self):
- return str(Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.porter_ansible_inventory.yml'))
+ return str(
+ Path(DEFAULT_CONFIG_ROOT).joinpath(
+ NODE_CONFIG_STORAGE_KEY,
+ f"{self.namespace_network}.porter_ansible_inventory.yml",
+ )
+ )
class tBTCv2Deployer(GenericDeployer):
-
- application = 'tbtcv2'
+ application = "tbtcv2"
required_fields = [
- 'eth_provider',
- 'docker_image',
+ "eth_provider",
+ "docker_image",
]
host_level_override_prompts = {
- 'eth_provider': {"prompt": "--eth-provider: please provide the websocket url of an ethereum node", "choices": None},
+ "eth_provider": {
+ "prompt": "--eth-provider: please provide the websocket url of an ethereum node",
+ "choices": None,
+ },
}
output_capture = {}
@property
def user(self) -> str:
- return 'tbtcv2'
+ return "tbtcv2"
def default_config(self):
defaults = {
- 'envvars':
- [
- ("OPERATOR_ETHEREUM_PASSWORD",
- self.config['ethpassword']),
- ],
- 'cliargs': []
+ "envvars": [
+ ("OPERATOR_ETHEREUM_PASSWORD", self.config["ethpassword"]),
+ ],
+ "cliargs": [],
}
return defaults
@property
def _inventory_template(self):
- template_path = Path(TEMPLATES).joinpath('tbtcv2_inventory.mako')
+ template_path = Path(TEMPLATES).joinpath("tbtcv2_inventory.mako")
return Template(filename=str(template_path))
@property
def inventory_path(self):
- return str(Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.tbtcv2_ansible_inventory.yml'))
+ return str(
+ Path(DEFAULT_CONFIG_ROOT).joinpath(
+ NODE_CONFIG_STORAGE_KEY,
+ f"{self.namespace_network}.tbtcv2_ansible_inventory.yml",
+ )
+ )
@property
def backup_directory(self):
- return f'{self.config_dir}/remote_operator_backups/'
+ return f"{self.config_dir}/remote_operator_backups/"
def stage_nodes(self, *args, **kwargs):
self.playbook_name = "stage_tbtcv2.yml"
try:
self.output_capture = {
- 'operator address': [],
+ "operator address": [],
}
return super().deploy(*args, **kwargs)
finally:
@@ -1868,15 +2060,14 @@ def get_operator_address(self, *args, **kwargs):
def _format_runtime_options(self, node_options):
# override function to not automatically include `--network `
- return ' '.join([f'--{name} {value}' for name, value in node_options.items()])
+ return " ".join([f"--{name} {value}" for name, value in node_options.items()])
class EthDeployer(GenericDeployer):
-
- playbook_name = 'setup_standalone_geth_node.yml'
- application = 'ethereum'
+ playbook_name = "setup_standalone_geth_node.yml"
+ application = "ethereum"
required_fields = [
- 'docker_image',
+ "docker_image",
]
host_level_override_prompts = {
# 'eth_provider': {"prompt": "--eth-provider: please provide the url of a hosted ethereum node (infura/geth) which this porter node can access", "choices": None},
@@ -1886,20 +2077,24 @@ class EthDeployer(GenericDeployer):
@property
def _inventory_template(self):
- template_path = Path(TEMPLATES).joinpath('ethereum_inventory.mako')
+ template_path = Path(TEMPLATES).joinpath("ethereum_inventory.mako")
return Template(filename=str(template_path))
@property
def inventory_path(self):
- return str(Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ethereum_ansible_inventory.yml'))
+ return str(
+ Path(DEFAULT_CONFIG_ROOT).joinpath(
+ NODE_CONFIG_STORAGE_KEY,
+ f"{self.namespace_network}.ethereum_ansible_inventory.yml",
+ )
+ )
@property
def user(self) -> str:
- return 'ethereum'
+ return "ethereum"
class CloudDeployers:
-
aws = AWSNodeConfigurator
digitalocean = DigitalOceanConfigurator
generic = GenericConfigurator
diff --git a/nucypher_ops/ops/keygen.py b/nucypher_ops/ops/keygen.py
index ddef4e8..55544ce 100644
--- a/nucypher_ops/ops/keygen.py
+++ b/nucypher_ops/ops/keygen.py
@@ -17,7 +17,6 @@
from getpass import getpass
-from hdwallet.cryptocurrencies import EthereumMainnet
from hdwallet.hdwallet import HDWallet
from hdwallet.utils import generate_entropy
@@ -32,14 +31,10 @@
def generate(prompt=False):
if passphrase := prompt:
- passphrase = getpass('Enter passphrase (optional): ')
+ passphrase = getpass("Enter passphrase (optional): ")
entropy: str = generate_entropy(strength=STRENGTH)
hdwallet = HDWallet()
- hdwallet.from_entropy(
- entropy=entropy,
- language=LANGUAGE,
- passphrase=passphrase
- )
+ hdwallet.from_entropy(entropy=entropy, language=LANGUAGE, passphrase=passphrase)
return hdwallet
@@ -52,18 +47,13 @@ def derive(wallet: HDWallet, quantity: int = ACCOUNTS):
def restore(words: str, prompt=False):
if passphrase := prompt:
- passphrase = getpass('Enter passphrase (optional): ')
+ passphrase = getpass("Enter passphrase (optional): ")
wallet = HDWallet()
- wallet.from_mnemonic(
- mnemonic=words,
- language=LANGUAGE,
- passphrase=passphrase
- )
+ wallet.from_mnemonic(mnemonic=words, language=LANGUAGE, passphrase=passphrase)
return wallet
if __name__ == "__main__":
-
# Generate
wallet = generate()
print(wallet.mnemonic())
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..5f688f7
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,7 @@
+[build-system]
+requires = ["setuptools", "wheel"]
+
+[tool.ruff]
+exclude = ["nucypher_ops/__init__.py"] # false positives
+select = ["E", "F", "I"]
+line-length = 180
diff --git a/setup.py b/setup.py
index af8ae9f..8c0ebdf 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
from typing import Dict
-PACKAGE_NAME = 'nucypher_ops'
+PACKAGE_NAME = "nucypher_ops"
BASE_DIR = Path(__file__).parent
ABOUT: Dict[str, str] = dict()
@@ -28,32 +28,32 @@
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
- "Topic :: Security"
+ "Topic :: Security",
]
setup(
packages=find_packages(),
include_package_data=True,
- name=ABOUT['__title__'],
- url=ABOUT['__url__'],
- version=ABOUT['__version__'],
- author=ABOUT['__author__'],
- author_email=ABOUT['__email__'],
- description=ABOUT['__summary__'],
- license=ABOUT['__license__'],
+ name=ABOUT["__title__"],
+ url=ABOUT["__url__"],
+ version=ABOUT["__version__"],
+ author=ABOUT["__author__"],
+ author_email=ABOUT["__email__"],
+ description=ABOUT["__summary__"],
+ license=ABOUT["__license__"],
install_requires=[
- 'click',
- 'colorama',
- 'ansible',
- 'hdwallet',
- 'mako',
- 'requests',
- 'maya',
- 'appdirs',
- 'tabulate',
+ "click",
+ "colorama",
+ "ansible",
+ "hdwallet",
+ "mako",
+ "requests",
+ "maya",
+ "appdirs",
+ "tabulate",
],
- entry_points='''
+ entry_points="""
[console_scripts]
nucypher-ops=nucypher_ops.cli.main:index
- ''',
+ """,
)
From 772f5a52b1d8edbba4a4e2d31e3df79d1d7c2214 Mon Sep 17 00:00:00 2001
From: piotr-roslaniec <39299780+piotr-roslaniec@users.noreply.github.com>
Date: Thu, 30 Nov 2023 16:12:01 +0100
Subject: [PATCH 2/4] Update .github/workflows/python.yml
Co-authored-by: Derek Pierre
Update .github/workflows/python.yml
Co-authored-by: Derek Pierre
Update nucypher_ops/__about__.py
Co-authored-by: Derek Pierre
Update nucypher_ops/__about__.py
Co-authored-by: Derek Pierre
---
.github/workflows/python.yml | 4 ++++
nucypher_ops/__about__.py | 4 ++--
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml
index 226bf31..4d3c0a0 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/python.yml
@@ -12,6 +12,10 @@ jobs:
steps:
- uses: actions/checkout@v4
+<<<<<<< HEAD
+=======
+
+>>>>>>> cced0c2 (Update .github/workflows/python.yml)
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
diff --git a/nucypher_ops/__about__.py b/nucypher_ops/__about__.py
index b80fac7..4c0c302 100644
--- a/nucypher_ops/__about__.py
+++ b/nucypher_ops/__about__.py
@@ -2,7 +2,7 @@
__url__ = "https://github.com/nucypher/nucypher-ops"
-__summary__ = "Install and management tools for a proxy re-encryption network to empower privacy in decentralized systems."
+__summary__ = "Node management tools for nodes on the Threshold Network to empower privacy in decentralized systems."
__version__ = "0.12.0"
@@ -12,4 +12,4 @@
__license__ = "GNU Affero General Public License, Version 3"
-__copyright__ = "Copyright (C) 2022 NuCypher"
+__copyright__ = "Copyright (C) 2023 NuCypher"
From a0e95e39bb21de5f8cb7d631b5ba57c5771dc853 Mon Sep 17 00:00:00 2001
From: Piotr Roslaniec
Date: Thu, 30 Nov 2023 16:17:55 +0100
Subject: [PATCH 3/4] apply pr suggestions
---
.github/workflows/python.yml | 24 ++++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml
index 4d3c0a0..eacf865 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/python.yml
@@ -3,8 +3,7 @@ name: Python package CI
on: [ push, pull_request ]
jobs:
- test:
-
+ build:
runs-on: ubuntu-20.04
strategy:
matrix:
@@ -24,10 +23,23 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install setuptools wheel ruff
-
- - name: Run ruff
- run: ruff check nucypher_ops
+ pip install setuptools wheel
- name: Build dist
run: python setup.py sdist bdist_wheel
+
+ lint:
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.12"
+
+ - name: Install ruff
+ run: pip install ruff
+
+ - name: Run ruff
+ run: ruff check --output-format=github nucypher_ops
From ed3eb89fac07c6a8a7f3ffc7bc7be2380950310e Mon Sep 17 00:00:00 2001
From: Piotr Roslaniec
Date: Thu, 30 Nov 2023 21:58:18 +0100
Subject: [PATCH 4/4] chore: fix linter after rebase
---
nucypher_ops/cli/main.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/nucypher_ops/cli/main.py b/nucypher_ops/cli/main.py
index 8fdbfd2..6065e50 100644
--- a/nucypher_ops/cli/main.py
+++ b/nucypher_ops/cli/main.py
@@ -1,3 +1,5 @@
+from importlib.metadata import version
+
import click
from nucypher_ops.cli.ethereum import cli as ethereum
@@ -7,8 +9,6 @@
from nucypher_ops.cli.tbtcv2 import cli as tbtcv2
from nucypher_ops.cli.ursula import cli as ursula
-from importlib.metadata import version
-
package_version = version('nucypher_ops')