diff --git a/CHANGELOG.md b/CHANGELOG.md
index 48589c8..8e8ffef 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,7 @@ NOTES:
BREAKING CHANGES:
ENHANCEMENTS:
-
+* Drop support for GPU workers on Spark
FEATURES:
BUG FIXES:
diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md
index f986a9e..51502e2 100644
--- a/docs/data-sources/cluster.md
+++ b/docs/data-sources/cluster.md
@@ -63,32 +63,8 @@ data "hopsworksai_clusters" "cluster" {
Read-Only:
-- `gpu_workers` (List of Object) (see [below for nested schema](#nestedobjatt--autoscale--gpu_workers))
- `non_gpu_workers` (List of Object) (see [below for nested schema](#nestedobjatt--autoscale--non_gpu_workers))
-
-### Nested Schema for `autoscale.gpu_workers`
-
-Read-Only:
-
-- `disk_size` (Number)
-- `downscale_wait_time` (Number)
-- `instance_type` (String)
-- `max_workers` (Number)
-- `min_workers` (Number)
-- `spot_config` (List of Object) (see [below for nested schema](#nestedobjatt--autoscale--gpu_workers--spot_config))
-- `standby_workers` (Number)
-
-
-### Nested Schema for `autoscale.gpu_workers.spot_config`
-
-Read-Only:
-
-- `fall_back_on_demand` (Boolean)
-- `max_price_percent` (Number)
-
-
-
### Nested Schema for `autoscale.non_gpu_workers`
diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md
index c316211..da014a3 100644
--- a/docs/data-sources/clusters.md
+++ b/docs/data-sources/clusters.md
@@ -87,32 +87,8 @@ Read-Only:
Read-Only:
-- `gpu_workers` (List of Object) (see [below for nested schema](#nestedobjatt--clusters--autoscale--gpu_workers))
- `non_gpu_workers` (List of Object) (see [below for nested schema](#nestedobjatt--clusters--autoscale--non_gpu_workers))
-
-### Nested Schema for `clusters.autoscale.gpu_workers`
-
-Read-Only:
-
-- `disk_size` (Number)
-- `downscale_wait_time` (Number)
-- `instance_type` (String)
-- `max_workers` (Number)
-- `min_workers` (Number)
-- `spot_config` (List of Object) (see [below for nested schema](#nestedobjatt--clusters--autoscale--gpu_workers--spot_config))
-- `standby_workers` (Number)
-
-
-### Nested Schema for `clusters.autoscale.gpu_workers.standby_workers`
-
-Read-Only:
-
-- `fall_back_on_demand` (Boolean)
-- `max_price_percent` (Number)
-
-
-
### Nested Schema for `clusters.autoscale.non_gpu_workers`
diff --git a/docs/data-sources/instance_type.md b/docs/data-sources/instance_type.md
index a043822..311acd1 100644
--- a/docs/data-sources/instance_type.md
+++ b/docs/data-sources/instance_type.md
@@ -42,7 +42,6 @@ data "hopsworksai_instance_type" "supported_type" {
### Optional
- `min_cpus` (Number) Filter based on the minimum number of CPU cores. Defaults to `0`.
-- `min_gpus` (Number) Filter based on the minimum number of GPUs. Defaults to `0`.
- `min_memory_gb` (Number) Filter based on the minimum memory in gigabytes. Defaults to `0`.
- `with_nvme` (Boolean) Filter based on the presence of NVMe drives. Defaults to `false`.
diff --git a/docs/data-sources/instance_types.md b/docs/data-sources/instance_types.md
index 474637a..24beb61 100644
--- a/docs/data-sources/instance_types.md
+++ b/docs/data-sources/instance_types.md
@@ -41,7 +41,6 @@ data "hopsworksai_instance_types" "supported_worker_types" {
Read-Only:
- `cpus` (Number)
-- `gpus` (Number)
- `id` (String)
- `memory` (Number)
- `with_nvme` (Boolean)
diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md
index a1133b6..046bebc 100644
--- a/docs/resources/cluster.md
+++ b/docs/resources/cluster.md
@@ -294,10 +294,6 @@ Required:
- `non_gpu_workers` (Block List, Min: 1, Max: 1) Setup auto scaling for non gpu nodes. (see [below for nested schema](#nestedblock--autoscale--non_gpu_workers))
-Optional:
-
-- `gpu_workers` (Block List, Max: 1) Setup auto scaling for gpu nodes. (see [below for nested schema](#nestedblock--autoscale--gpu_workers))
-
### Nested Schema for `autoscale.non_gpu_workers`
@@ -324,32 +320,6 @@ Optional:
-
-### Nested Schema for `autoscale.gpu_workers`
-
-Required:
-
-- `instance_type` (String) The instance type to use while auto scaling.
-
-Optional:
-
-- `disk_size` (Number) The disk size to use while auto scaling Defaults to `512`.
-- `downscale_wait_time` (Number) The time to wait before removing unused resources. Defaults to `300`.
-- `max_workers` (Number) The maximum number of workers created by auto scaling. Defaults to `10`.
-- `min_workers` (Number) The minimum number of workers created by auto scaling. Defaults to `0`.
-- `spot_config` (Block List, Max: 1) The configuration to use spot instances (see [below for nested schema](#nestedblock--autoscale--gpu_workers--spot_config))
-- `standby_workers` (Number) The percentage of workers to be always available during auto scaling. If you set this value to 0 new workers will only be added when a job or a notebook requests the resources. This attribute will not be taken into account if you set the minimum number of workers to 0 and no resources are used in the cluster, instead, it will start to take effect as soon as you start using resources. Defaults to `0.5`.
-
-
-### Nested Schema for `autoscale.gpu_workers.spot_config`
-
-Optional:
-
-- `fall_back_on_demand` (Boolean) Fall back to on demand instance if unable to allocate a spot instance Defaults to `true`.
-- `max_price_percent` (Number) The maximum spot instance price in percentage of the on-demand price. Defaults to `100`.
-
-
-
### Nested Schema for `aws_attributes`
diff --git a/docs/resources/cluster_from_backup.md b/docs/resources/cluster_from_backup.md
index 42ebb03..196b419 100644
--- a/docs/resources/cluster_from_backup.md
+++ b/docs/resources/cluster_from_backup.md
@@ -70,10 +70,6 @@ Required:
- `non_gpu_workers` (Block List, Min: 1, Max: 1) Setup auto scaling for non gpu nodes. (see [below for nested schema](#nestedblock--autoscale--non_gpu_workers))
-Optional:
-
-- `gpu_workers` (Block List, Max: 1) Setup auto scaling for gpu nodes. (see [below for nested schema](#nestedblock--autoscale--gpu_workers))
-
### Nested Schema for `autoscale.non_gpu_workers`
@@ -100,32 +96,6 @@ Optional:
-
-### Nested Schema for `autoscale.gpu_workers`
-
-Required:
-
-- `instance_type` (String) The instance type to use while auto scaling.
-
-Optional:
-
-- `disk_size` (Number) The disk size to use while auto scaling Defaults to `512`.
-- `downscale_wait_time` (Number) The time to wait before removing unused resources. Defaults to `300`.
-- `max_workers` (Number) The maximum number of workers created by auto scaling. Defaults to `10`.
-- `min_workers` (Number) The minimum number of workers created by auto scaling. Defaults to `0`.
-- `spot_config` (Block List, Max: 1) The configuration to use spot instances (see [below for nested schema](#nestedblock--autoscale--gpu_workers--spot_config))
-- `standby_workers` (Number) The percentage of workers to be always available during auto scaling. If you set this value to 0 new workers will only be added when a job or a notebook requests the resources. This attribute will not be taken into account if you set the minimum number of workers to 0 and no resources are used in the cluster, instead, it will start to take effect as soon as you start using resources. Defaults to `0.5`.
-
-
-### Nested Schema for `autoscale.gpu_workers.spot_config`
-
-Optional:
-
-- `fall_back_on_demand` (Boolean) Fall back to on demand instance if unable to allocate a spot instance Defaults to `true`.
-- `max_price_percent` (Number) The maximum spot instance price in percentage of the on-demand price. Defaults to `100`.
-
-
-
### Nested Schema for `aws_attributes`
diff --git a/examples/complete/aws/autoscale/README.md b/examples/complete/aws/autoscale/README.md
index 42a01e5..cd2b101 100644
--- a/examples/complete/aws/autoscale/README.md
+++ b/examples/complete/aws/autoscale/README.md
@@ -44,15 +44,15 @@ terraform apply
## Update Autoscale
-You can update the autoscale configuration after creations, by changing the `autoscale` configuration block. For example, you can configure autoscale for GPU workers as follows:
+You can update the autoscale configuration after creations, by changing the `autoscale` configuration block. For example, you can configure autoscale as follows:
> **Notice** that you need to run `terraform apply` after updating your configuration for your changes to take place.
```hcl
-data "hopsworksai_instance_type" "gpu_worker" {
+data "hopsworksai_instance_type" "small_worker" {
cloud_provider = "AWS"
node_type = "worker"
- min_gpus = 1
+ min_cpus = 8
}
resource "hopsworksai_cluster" "cluster" {
@@ -67,15 +67,6 @@ resource "hopsworksai_cluster" "cluster" {
standby_workers = 0.5
downscale_wait_time = 300
}
-
- gpu_workers {
- instance_type = data.hopsworksai_instance_type.gpu_worker.id
- disk_size = 256
- min_workers = 0
- max_workers = 5
- standby_workers = 0.5
- downscale_wait_time = 300
- }
}
}
diff --git a/examples/complete/aws/basic/README.md b/examples/complete/aws/basic/README.md
index 4606d9f..3df3a10 100644
--- a/examples/complete/aws/basic/README.md
+++ b/examples/complete/aws/basic/README.md
@@ -82,13 +82,13 @@ resource "hopsworksai_cluster" "cluster" {
}
```
-You can add a new different worker type for example another worker with at least one gpu as follows:
+You can add a new different worker type for example another worker with at least 16 cpu cores as follows:
```hcl
-data "hopsworksai_instance_type" "gpu_worker" {
+data "hopsworksai_instance_type" "my_worker" {
cloud_provider = "AWS"
node_type = "worker"
- min_gpus = 1
+ min_cpus = 16
}
resource "hopsworksai_cluster" "cluster" {
@@ -101,7 +101,7 @@ resource "hopsworksai_cluster" "cluster" {
}
workers {
- instance_type = data.hopsworksai_instance_type.gpu_worker.id
+ instance_type = data.hopsworksai_instance_type.my_worker.id
disk_size = 512
count = 1
}
diff --git a/examples/complete/azure/autoscale/README.md b/examples/complete/azure/autoscale/README.md
index d092472..d243ab6 100644
--- a/examples/complete/azure/autoscale/README.md
+++ b/examples/complete/azure/autoscale/README.md
@@ -44,15 +44,15 @@ terraform apply -var="resource_group="
## Update Autoscale
-You can update the autoscale configuration after creations, by changing the `autoscale` configuration block. For example, you can configure autoscale for GPU workers as follows:
+You can update the autoscale configuration after creations, by changing the `autoscale` configuration block. For example, you can configure your own worker as follows:
> **Notice** that you need to run `terraform apply` after updating your configuration for your changes to take place.
```hcl
-data "hopsworksai_instance_type" "gpu_worker" {
+data "hopsworksai_instance_type" "my_worker" {
cloud_provider = "AZURE"
node_type = "worker"
- min_gpus = 1
+ min_cpus = 16
}
resource "hopsworksai_cluster" "cluster" {
@@ -60,22 +60,13 @@ resource "hopsworksai_cluster" "cluster" {
autoscale {
non_gpu_workers {
- instance_type = data.hopsworksai_instance_type.small_worker.id
+ instance_type = data.hopsworksai_instance_type.my_worker.id
disk_size = 256
min_workers = 0
max_workers = 10
standby_workers = 0.5
downscale_wait_time = 300
}
-
- gpu_workers {
- instance_type = data.hopsworksai_instance_type.gpu_worker.id
- disk_size = 256
- min_workers = 0
- max_workers = 5
- standby_workers = 0.5
- downscale_wait_time = 300
- }
}
}
diff --git a/examples/complete/azure/basic/README.md b/examples/complete/azure/basic/README.md
index fd0ef06..23d74dc 100644
--- a/examples/complete/azure/basic/README.md
+++ b/examples/complete/azure/basic/README.md
@@ -82,13 +82,13 @@ resource "hopsworksai_cluster" "cluster" {
}
```
-You can add a new different worker type for example another worker with at least one gpu as follows:
+You can add a new different worker type for example another worker with at least 16 cpu cores follows:
```hcl
-data "hopsworksai_instance_type" "gpu_worker" {
+data "hopsworksai_instance_type" "my_worker" {
cloud_provider = "AZURE"
node_type = "worker"
- min_gpus = 1
+ min_cpus = 16
}
resource "hopsworksai_cluster" "cluster" {
@@ -101,7 +101,7 @@ resource "hopsworksai_cluster" "cluster" {
}
workers {
- instance_type = data.hopsworksai_instance_type.gpu_worker.id
+ instance_type = data.hopsworksai_instance_type.my_worker.id
disk_size = 512
count = 1
}
diff --git a/hopsworksai/data_source_instance_type.go b/hopsworksai/data_source_instance_type.go
index c368122..db108b0 100644
--- a/hopsworksai/data_source_instance_type.go
+++ b/hopsworksai/data_source_instance_type.go
@@ -46,13 +46,6 @@ func dataSourceInstanceType() *schema.Resource {
Default: 0,
ValidateFunc: validation.IntAtLeast(0),
},
- "min_gpus": {
- Description: "Filter based on the minimum number of GPUs.",
- Type: schema.TypeInt,
- Optional: true,
- Default: 0,
- ValidateFunc: validation.IntAtLeast(0),
- },
"with_nvme": {
Description: "Filter based on the presence of NVMe drives.",
Type: schema.TypeBool,
@@ -85,7 +78,6 @@ func dataSourceInstanceTypeRead(ctx context.Context, d *schema.ResourceData, met
minMemory := d.Get("min_memory_gb").(float64)
minCPUs := d.Get("min_cpus").(int)
- minGPUs := d.Get("min_gpus").(int)
withNVMe := d.Get("with_nvme").(bool)
var chosenType *api.SupportedInstanceType = nil
@@ -96,9 +88,6 @@ func dataSourceInstanceTypeRead(ctx context.Context, d *schema.ResourceData, met
if minCPUs > 0 && v.CPUs < minCPUs {
continue
}
- if minGPUs > 0 && v.GPUs < minGPUs {
- continue
- }
if withNVMe != v.WithNVMe {
continue
}
diff --git a/hopsworksai/data_source_instance_type_test.go b/hopsworksai/data_source_instance_type_test.go
index 20b93f5..a808a40 100644
--- a/hopsworksai/data_source_instance_type_test.go
+++ b/hopsworksai/data_source_instance_type_test.go
@@ -24,38 +24,36 @@ func TestInstanceTypeDataSourceRead(t *testing.T) {
func TestInstanceTypeDataSourceRead_filtered(t *testing.T) {
for _, c := range []api.CloudProvider{api.AWS, api.AZURE} {
- testInstanceTypeDataSourceBase(t, c, api.HeadNode, 20, 0, 0, false, "head-type-1")
- testInstanceTypeDataSourceBase(t, c, api.HeadNode, 21, 0, 0, false, "head-type-2")
- testInstanceTypeDataSourceBase(t, c, api.HeadNode, 0, 10, 0, false, "head-type-1")
- testInstanceTypeDataSourceBase(t, c, api.HeadNode, 0, 11, 0, false, "head-type-2")
- testInstanceTypeDataSourceBase(t, c, api.HeadNode, 0, 0, 1, false, "head-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.HeadNode, 20, 0, false, "head-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.HeadNode, 21, 0, false, "head-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.HeadNode, 0, 10, false, "head-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.HeadNode, 0, 11, false, "head-type-2")
- testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 20, 0, 0, false, "worker-type-1")
- testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 21, 0, 0, false, "worker-type-2")
- testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 10, 0, false, "worker-type-1")
- testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 11, 0, false, "worker-type-2")
- testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 0, 1, false, "worker-type-2")
- testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 0, 0, true, "worker-type-3")
+ testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 20, 0, false, "worker-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 21, 0, false, "worker-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 10, false, "worker-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 11, false, "worker-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.WorkerNode, 0, 0, true, "worker-type-3")
- testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 20, 0, 0, false, "mgm-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 21, 0, 0, false, "mgm-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 0, 2, 0, false, "mgm-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 0, 3, 0, false, "mgm-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 20, 0, false, "mgm-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 21, 0, false, "mgm-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 0, 2, false, "mgm-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBManagementNode, 0, 3, false, "mgm-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 50, 0, 0, false, "ndbd-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 51, 0, 0, false, "ndbd-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 0, 8, 0, false, "ndbd-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 0, 9, 0, false, "ndbd-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 50, 0, false, "ndbd-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 51, 0, false, "ndbd-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 0, 8, false, "ndbd-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBDataNode, 0, 9, false, "ndbd-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 50, 0, 0, false, "mysql-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 51, 0, 0, false, "mysql-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 0, 8, 0, false, "mysql-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 0, 9, 0, false, "mysql-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 50, 0, false, "mysql-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 51, 0, false, "mysql-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 0, 8, false, "mysql-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBMySQLNode, 0, 9, false, "mysql-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 50, 0, 0, false, "api-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 51, 0, 0, false, "api-type-1")
- testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 0, 8, 0, false, "api-type-2")
- testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 0, 9, 0, false, "api-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 50, 0, false, "api-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 51, 0, false, "api-type-1")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 0, 8, false, "api-type-2")
+ testInstanceTypeDataSourceBase(t, c, api.RonDBAPINode, 0, 9, false, "api-type-1")
}
}
@@ -87,10 +85,10 @@ func TestInstanceTypeDataSourceRead_error(t *testing.T) {
}
func testInstanceTypeDataSource(t *testing.T, cloud api.CloudProvider, nodeType api.NodeType, expectedId string) {
- testInstanceTypeDataSourceBase(t, cloud, nodeType, 0, 0, 0, false, expectedId)
+ testInstanceTypeDataSourceBase(t, cloud, nodeType, 0, 0, false, expectedId)
}
-func testInstanceTypeDataSourceBase(t *testing.T, cloud api.CloudProvider, nodeType api.NodeType, minMemory float64, minCPU int, minGPU int, withNvme bool, expectedId string) {
+func testInstanceTypeDataSourceBase(t *testing.T, cloud api.CloudProvider, nodeType api.NodeType, minMemory float64, minCPU int, withNvme bool, expectedId string) {
r := test.ResourceFixture{
HttpOps: []test.Operation{
{
@@ -106,34 +104,29 @@ func testInstanceTypeDataSourceBase(t *testing.T, cloud api.CloudProvider, nodeT
{
"id": "head-type-1",
"memory": 20,
- "cpus": 10,
- "gpus": 0
+ "cpus": 10
},
{
"id": "head-type-2",
"memory": 50,
- "cpus": 20,
- "gpus": 1
+ "cpus": 20
}
],
"worker": [
{
"id": "worker-type-1",
"memory": 20,
- "cpus": 10,
- "gpus": 0
+ "cpus": 10
},
{
"id": "worker-type-2",
"memory": 50,
- "cpus": 20,
- "gpus": 1
+ "cpus": 20
},
{
"id": "worker-type-3",
"memory": 50,
"cpus": 20,
- "gpus": 1,
"withNvme": true
}
],
@@ -142,56 +135,48 @@ func testInstanceTypeDataSourceBase(t *testing.T, cloud api.CloudProvider, nodeT
{
"id": "mgm-type-1",
"memory": 30,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "mgm-type-2",
"memory": 20,
- "cpus": 2,
- "gpus": 0
+ "cpus": 2
}
],
"ndbd": [
{
"id": "ndbd-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "ndbd-type-2",
"memory": 50,
- "cpus": 8,
- "gpus": 0
+ "cpus": 8
}
],
"mysqld": [
{
"id": "mysql-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "mysql-type-2",
"memory": 50,
- "cpus": 8,
- "gpus": 0
+ "cpus": 8
}
],
"api": [
{
"id": "api-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "api-type-2",
"memory": 50,
- "cpus": 8,
- "gpus": 0
+ "cpus": 8
}
]
}
@@ -207,7 +192,6 @@ func testInstanceTypeDataSourceBase(t *testing.T, cloud api.CloudProvider, nodeT
"cloud_provider": cloud.String(),
"min_memory_gb": minMemory,
"min_cpus": minCPU,
- "min_gpus": minGPU,
"with_nvme": withNvme,
},
ExpectId: expectedId,
diff --git a/hopsworksai/data_source_instance_types.go b/hopsworksai/data_source_instance_types.go
index 60633ca..5e3e236 100644
--- a/hopsworksai/data_source_instance_types.go
+++ b/hopsworksai/data_source_instance_types.go
@@ -53,11 +53,6 @@ func dataSourceInstanceTypes() *schema.Resource {
Type: schema.TypeInt,
Computed: true,
},
- "gpus": {
- Description: "The instance type number of GPUs.",
- Type: schema.TypeInt,
- Computed: true,
- },
"with_nvme": {
Description: "The instance type is equipped of NVMe drives.",
Type: schema.TypeBool,
diff --git a/hopsworksai/data_source_instance_types_test.go b/hopsworksai/data_source_instance_types_test.go
index f72a80e..bee9b8c 100644
--- a/hopsworksai/data_source_instance_types_test.go
+++ b/hopsworksai/data_source_instance_types_test.go
@@ -127,14 +127,12 @@ func TestInstanceTypesDataSourceRead(t *testing.T) {
"id": "head-type-1",
"memory": 20.0,
"cpus": 10,
- "gpus": 0,
"with_nvme": false,
},
map[string]interface{}{
"id": "head-type-2",
"memory": 50.0,
"cpus": 20,
- "gpus": 1,
"with_nvme": false,
},
})
@@ -144,14 +142,12 @@ func TestInstanceTypesDataSourceRead(t *testing.T) {
"id": "worker-type-1",
"memory": 20.0,
"cpus": 10,
- "gpus": 0,
"with_nvme": false,
},
map[string]interface{}{
"id": "worker-type-2",
"memory": 50.0,
"cpus": 20,
- "gpus": 1,
"with_nvme": false,
},
})
@@ -161,14 +157,12 @@ func TestInstanceTypesDataSourceRead(t *testing.T) {
"id": "mgm-type-2",
"memory": 20.0,
"cpus": 2,
- "gpus": 0,
"with_nvme": false,
},
map[string]interface{}{
"id": "mgm-type-1",
"memory": 30.0,
"cpus": 16,
- "gpus": 0,
"with_nvme": false,
},
})
@@ -178,14 +172,12 @@ func TestInstanceTypesDataSourceRead(t *testing.T) {
"id": "ndbd-type-2",
"memory": 50.0,
"cpus": 8,
- "gpus": 0,
"with_nvme": false,
},
map[string]interface{}{
"id": "ndbd-type-1",
"memory": 100.0,
"cpus": 16,
- "gpus": 0,
"with_nvme": false,
},
})
@@ -195,14 +187,12 @@ func TestInstanceTypesDataSourceRead(t *testing.T) {
"id": "mysql-type-2",
"memory": 50.0,
"cpus": 8,
- "gpus": 0,
"with_nvme": false,
},
map[string]interface{}{
"id": "mysql-type-1",
"memory": 100.0,
"cpus": 16,
- "gpus": 0,
"with_nvme": false,
},
})
@@ -212,14 +202,12 @@ func TestInstanceTypesDataSourceRead(t *testing.T) {
"id": "api-type-2",
"memory": 50.0,
"cpus": 8,
- "gpus": 0,
"with_nvme": false,
},
map[string]interface{}{
"id": "api-type-1",
"memory": 100.0,
"cpus": 16,
- "gpus": 0,
"with_nvme": false,
},
})
@@ -242,28 +230,24 @@ func testInstanceTypesDataSource(t *testing.T, cloud api.CloudProvider, nodeType
{
"id": "head-type-1",
"memory": 20,
- "cpus": 10,
- "gpus": 0
+ "cpus": 10
},
{
"id": "head-type-2",
"memory": 50,
- "cpus": 20,
- "gpus": 1
+ "cpus": 20
}
],
"worker": [
{
"id": "worker-type-1",
"memory": 20,
- "cpus": 10,
- "gpus": 0
+ "cpus": 10
},
{
"id": "worker-type-2",
"memory": 50,
- "cpus": 20,
- "gpus": 1
+ "cpus": 20
}
],
"ronDB": {
@@ -271,56 +255,48 @@ func testInstanceTypesDataSource(t *testing.T, cloud api.CloudProvider, nodeType
{
"id": "mgm-type-1",
"memory": 30,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "mgm-type-2",
"memory": 20,
- "cpus": 2,
- "gpus": 0
+ "cpus": 2
}
],
"ndbd": [
{
"id": "ndbd-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "ndbd-type-2",
"memory": 50,
- "cpus": 8,
- "gpus": 0
+ "cpus": 8
}
],
"mysqld": [
{
"id": "mysql-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "mysql-type-2",
"memory": 50,
- "cpus": 8,
- "gpus": 0
+ "cpus": 8
}
],
"api": [
{
"id": "api-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
},
{
"id": "api-type-2",
"memory": 50,
- "cpus": 8,
- "gpus": 0
+ "cpus": 8
}
]
}
diff --git a/hopsworksai/internal/api/apis_test.go b/hopsworksai/internal/api/apis_test.go
index 4c88dc3..8d71eca 100644
--- a/hopsworksai/internal/api/apis_test.go
+++ b/hopsworksai/internal/api/apis_test.go
@@ -1184,28 +1184,24 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
{
"id": "head-type-1",
"memory": 20,
- "cpus": 10,
- "gpus": 0
+ "cpus": 10
},
{
"id": "head-type-2",
"memory": 50,
- "cpus": 20,
- "gpus": 1
+ "cpus": 20
}
],
"worker": [
{
"id": "worker-type-1",
"memory": 20,
- "cpus": 10,
- "gpus": 0
+ "cpus": 10
},
{
"id": "worker-type-2",
"memory": 50,
- "cpus": 20,
- "gpus": 1
+ "cpus": 20
}
],
"ronDB": {
@@ -1213,32 +1209,28 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
{
"id": "mgm-type-1",
"memory": 30,
- "cpus": 2,
- "gpus": 0
+ "cpus": 2
}
],
"ndbd": [
{
"id": "ndbd-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
}
],
"mysqld": [
{
"id": "mysql-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
}
],
"api": [
{
"id": "api-type-1",
"memory": 100,
- "cpus": 16,
- "gpus": 0
+ "cpus": 16
}
]
}
@@ -1260,13 +1252,11 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
Id: "head-type-1",
Memory: 20,
CPUs: 10,
- GPUs: 0,
},
{
Id: "head-type-2",
Memory: 50,
CPUs: 20,
- GPUs: 1,
},
},
Worker: SupportedInstanceTypeList{
@@ -1274,13 +1264,11 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
Id: "worker-type-1",
Memory: 20,
CPUs: 10,
- GPUs: 0,
},
{
Id: "worker-type-2",
Memory: 50,
CPUs: 20,
- GPUs: 1,
},
},
RonDB: SupportedRonDBInstanceTypes{
@@ -1289,7 +1277,6 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
Id: "mgm-type-1",
Memory: 30,
CPUs: 2,
- GPUs: 0,
},
},
DataNode: SupportedInstanceTypeList{
@@ -1297,7 +1284,6 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
Id: "ndbd-type-1",
Memory: 100,
CPUs: 16,
- GPUs: 0,
},
},
MySQLNode: SupportedInstanceTypeList{
@@ -1305,7 +1291,6 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
Id: "mysql-type-1",
Memory: 100,
CPUs: 16,
- GPUs: 0,
},
},
APINode: SupportedInstanceTypeList{
@@ -1313,7 +1298,6 @@ func testGetSupportedInstanceTypes(t *testing.T, cloud CloudProvider, region str
Id: "api-type-1",
Memory: 100,
CPUs: 16,
- GPUs: 0,
},
},
},
@@ -1379,49 +1363,6 @@ func testConfigureAutoscale(t *testing.T, reqBody string, config *AutoscaleConfi
}
func TestConfigureAutoscale(t *testing.T) {
- testConfigureAutoscale(t, `
- {
- "autoscale":
- {
- "nonGpu":
- {
- "instanceType": "non-gpu-node",
- "diskSize": 256,
- "minWorkers": 0,
- "maxWorkers": 10,
- "standbyWorkers": 0.5,
- "downscaleWaitTime": 300
- },
- "gpu":
- {
- "instanceType": "gpu-node",
- "diskSize": 512,
- "minWorkers": 1,
- "maxWorkers": 5,
- "standbyWorkers": 0.4,
- "downscaleWaitTime": 200
- }
- }
- }`,
- &AutoscaleConfiguration{
- NonGPU: &AutoscaleConfigurationBase{
- InstanceType: "non-gpu-node",
- DiskSize: 256,
- MinWorkers: 0,
- MaxWorkers: 10,
- StandbyWorkers: 0.5,
- DownscaleWaitTime: 300,
- },
- GPU: &AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 5,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
- })
-
testConfigureAutoscale(t, `
{
"autoscale":{
@@ -1446,31 +1387,6 @@ func TestConfigureAutoscale(t *testing.T) {
DownscaleWaitTime: 300,
},
})
-
- testConfigureAutoscale(t, `
- {
- "autoscale":{
- "gpu":{
- "instanceType": "gpu-node",
- "diskSize": 512,
- "minWorkers": 1,
- "maxWorkers": 5,
- "standbyWorkers": 0.4,
- "downscaleWaitTime": 200
- }
- }
- }
- `,
- &AutoscaleConfiguration{
- GPU: &AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 5,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
- })
}
func TestDisableAutoscale(t *testing.T) {
diff --git a/hopsworksai/internal/api/model.go b/hopsworksai/internal/api/model.go
index f47874b..1b5f3a7 100644
--- a/hopsworksai/internal/api/model.go
+++ b/hopsworksai/internal/api/model.go
@@ -153,7 +153,6 @@ type AutoscaleConfigurationBase struct {
type AutoscaleConfiguration struct {
NonGPU *AutoscaleConfigurationBase `json:"nonGpu,omitempty"`
- GPU *AutoscaleConfigurationBase `json:"gpu,omitempty"`
}
type UpgradeInProgress struct {
@@ -362,7 +361,6 @@ type SupportedInstanceType struct {
Id string `json:"id"`
CPUs int `json:"cpus"`
Memory float64 `json:"memory"`
- GPUs int `json:"gpus"`
WithNVMe bool `json:"withNVMe"`
}
@@ -370,10 +368,6 @@ type SupportedInstanceTypeList []SupportedInstanceType
func (l SupportedInstanceTypeList) Sort() {
sort.SliceStable(l, func(i, j int) bool {
- if l[i].GPUs != l[j].GPUs {
- return l[i].GPUs < l[j].GPUs
- }
-
if l[i].CPUs != l[j].CPUs {
return l[i].CPUs < l[j].CPUs
}
diff --git a/hopsworksai/internal/api/model_test.go b/hopsworksai/internal/api/model_test.go
index 559cc6e..925b704 100644
--- a/hopsworksai/internal/api/model_test.go
+++ b/hopsworksai/internal/api/model_test.go
@@ -68,13 +68,6 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
Id: "head-type-1",
CPUs: 10,
Memory: 30,
- GPUs: 0,
- },
- {
- Id: "head-type-2",
- CPUs: 10,
- Memory: 30,
- GPUs: 1,
},
},
Worker: SupportedInstanceTypeList{
@@ -82,13 +75,6 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
Id: "worker-type-1",
CPUs: 16,
Memory: 100,
- GPUs: 0,
- },
- {
- Id: "worker-type-2",
- CPUs: 32,
- Memory: 100,
- GPUs: 1,
},
},
RonDB: SupportedRonDBInstanceTypes{
@@ -97,7 +83,6 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
Id: "rondb-mgm-type-1",
CPUs: 2,
Memory: 30,
- GPUs: 0,
},
},
DataNode: SupportedInstanceTypeList{
@@ -105,13 +90,11 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
Id: "rondb-data-type-1",
CPUs: 16,
Memory: 100,
- GPUs: 0,
},
{
Id: "rondb-data-type-2",
CPUs: 32,
Memory: 200,
- GPUs: 0,
},
},
MySQLNode: SupportedInstanceTypeList{
@@ -119,13 +102,11 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
Id: "rondb-mysql-type-1",
CPUs: 8,
Memory: 100,
- GPUs: 0,
},
{
Id: "rondb-mysql-type-2",
CPUs: 16,
Memory: 100,
- GPUs: 0,
},
},
APINode: SupportedInstanceTypeList{
@@ -133,13 +114,11 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
Id: "rondb-api-type-1",
CPUs: 8,
Memory: 100,
- GPUs: 0,
},
{
Id: "rondb-api-type-2",
CPUs: 16,
Memory: 100,
- GPUs: 0,
},
},
},
@@ -189,71 +168,35 @@ func TestGetSupportedNodesByNodeType(t *testing.T) {
func TestSortSupportedNodeTypes(t *testing.T) {
input := SupportedInstanceTypeList{
- {
- Id: "node-type-1",
- CPUs: 16,
- Memory: 32,
- GPUs: 1,
- },
{
Id: "node-type-2",
CPUs: 32,
Memory: 64,
- GPUs: 0,
- },
- {
- Id: "node-type-3",
- CPUs: 8,
- Memory: 16,
- GPUs: 1,
},
{
Id: "node-type-4",
CPUs: 4,
Memory: 16,
- GPUs: 0,
},
{
Id: "node-type-5",
CPUs: 2,
Memory: 8,
- GPUs: 0,
},
{
Id: "node-type-6",
CPUs: 16,
Memory: 32,
- GPUs: 0,
- },
- {
- Id: "node-type-7",
- CPUs: 32,
- Memory: 64,
- GPUs: 2,
},
{
Id: "node-type-8",
CPUs: 8,
Memory: 16,
- GPUs: 0,
- },
- {
- Id: "node-type-9",
- CPUs: 4,
- Memory: 16,
- GPUs: 1,
- },
- {
- Id: "node-type-10",
- CPUs: 2,
- Memory: 8,
- GPUs: 2,
},
{
Id: "node-type-11",
CPUs: 2,
Memory: 16,
- GPUs: 0,
},
}
@@ -262,67 +205,31 @@ func TestSortSupportedNodeTypes(t *testing.T) {
Id: "node-type-5",
CPUs: 2,
Memory: 8,
- GPUs: 0,
},
{
Id: "node-type-11",
CPUs: 2,
Memory: 16,
- GPUs: 0,
},
{
Id: "node-type-4",
CPUs: 4,
Memory: 16,
- GPUs: 0,
},
{
Id: "node-type-8",
CPUs: 8,
Memory: 16,
- GPUs: 0,
},
{
Id: "node-type-6",
CPUs: 16,
Memory: 32,
- GPUs: 0,
},
{
Id: "node-type-2",
CPUs: 32,
Memory: 64,
- GPUs: 0,
- },
- {
- Id: "node-type-9",
- CPUs: 4,
- Memory: 16,
- GPUs: 1,
- },
- {
- Id: "node-type-3",
- CPUs: 8,
- Memory: 16,
- GPUs: 1,
- },
- {
- Id: "node-type-1",
- CPUs: 16,
- Memory: 32,
- GPUs: 1,
- },
- {
- Id: "node-type-10",
- CPUs: 2,
- Memory: 8,
- GPUs: 2,
- },
- {
- Id: "node-type-7",
- CPUs: 32,
- Memory: 64,
- GPUs: 2,
},
}
diff --git a/hopsworksai/internal/structure/cluster.go b/hopsworksai/internal/structure/cluster.go
index d56fff6..da0cfd6 100644
--- a/hopsworksai/internal/structure/cluster.go
+++ b/hopsworksai/internal/structure/cluster.go
@@ -322,18 +322,13 @@ func flattenAutoscaleConfiguration(autoscale *api.AutoscaleConfiguration) []map[
}
var nonGPUNodes []interface{} = make([]interface{}, 0)
- var gpuNodes []interface{} = make([]interface{}, 0)
if autoscale.NonGPU != nil {
nonGPUNodes = append(nonGPUNodes, flattenAutoscaleConfigurationBase(autoscale.NonGPU))
}
- if autoscale.GPU != nil {
- gpuNodes = append(gpuNodes, flattenAutoscaleConfigurationBase(autoscale.GPU))
- }
return []map[string]interface{}{
{
"non_gpu_workers": nonGPUNodes,
- "gpu_workers": gpuNodes,
},
}
}
@@ -364,11 +359,6 @@ func ExpandAutoscaleConfiguration(autoscaleConfig []interface{}) *api.AutoscaleC
config := v.([]interface{})[0].(map[string]interface{})
autoscale.NonGPU = ExpandAutoscaleConfigurationBase(config)
}
-
- if v, ok := autoscaleConfigMap["gpu_workers"]; ok && len(v.([]interface{})) > 0 {
- config := v.([]interface{})[0].(map[string]interface{})
- autoscale.GPU = ExpandAutoscaleConfigurationBase(config)
- }
}
return autoscale
}
diff --git a/hopsworksai/internal/structure/cluster_test.go b/hopsworksai/internal/structure/cluster_test.go
index 0a5b99a..4901e56 100644
--- a/hopsworksai/internal/structure/cluster_test.go
+++ b/hopsworksai/internal/structure/cluster_test.go
@@ -393,14 +393,6 @@ func TestFlattenCluster(t *testing.T) {
StandbyWorkers: 0.5,
DownscaleWaitTime: 300,
},
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "auto-gpu-node-1",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 5,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
},
InitScript: "#!/usr/bin/env bash\nset -e\necho 'Hello World'",
RunInitScriptFirst: true,
@@ -943,18 +935,6 @@ func TestFlattenAutoscaleConfiguration(t *testing.T) {
FallBackOnDemand: true,
},
},
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 10,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- SpotInfo: &api.SpotConfiguration{
- MaxPrice: 2,
- FallBackOnDemand: true,
- },
- },
},
expected: []map[string]interface{}{
{
@@ -974,22 +954,6 @@ func TestFlattenAutoscaleConfiguration(t *testing.T) {
},
},
},
- "gpu_workers": []interface{}{
- map[string]interface{}{
- "instance_type": "gpu-node",
- "disk_size": 512,
- "min_workers": 1,
- "max_workers": 10,
- "standby_workers": 0.4,
- "downscale_wait_time": 200,
- "spot_config": []interface{}{
- map[string]interface{}{
- "max_price_percent": 2,
- "fall_back_on_demand": true,
- },
- },
- },
- },
},
},
},
@@ -1016,34 +980,6 @@ func TestFlattenAutoscaleConfiguration(t *testing.T) {
"downscale_wait_time": 300,
},
},
- "gpu_workers": []interface{}{},
- },
- },
- },
- {
- input: &api.AutoscaleConfiguration{
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 10,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
- },
- expected: []map[string]interface{}{
- {
- "non_gpu_workers": []interface{}{},
- "gpu_workers": []interface{}{
- map[string]interface{}{
- "instance_type": "gpu-node",
- "disk_size": 512,
- "min_workers": 1,
- "max_workers": 10,
- "standby_workers": 0.4,
- "downscale_wait_time": 200,
- },
- },
},
},
},
@@ -1080,18 +1016,6 @@ func TestExpandAutoscaleConfiguration(t *testing.T) {
FallBackOnDemand: true,
},
},
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 10,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- SpotInfo: &api.SpotConfiguration{
- MaxPrice: 2,
- FallBackOnDemand: true,
- },
- },
},
input: []interface{}{
map[string]interface{}{
@@ -1111,22 +1035,6 @@ func TestExpandAutoscaleConfiguration(t *testing.T) {
},
},
},
- "gpu_workers": []interface{}{
- map[string]interface{}{
- "instance_type": "gpu-node",
- "disk_size": 512,
- "min_workers": 1,
- "max_workers": 10,
- "standby_workers": 0.4,
- "downscale_wait_time": 200,
- "spot_config": []interface{}{
- map[string]interface{}{
- "max_price_percent": 2,
- "fall_back_on_demand": true,
- },
- },
- },
- },
},
},
},
@@ -1153,34 +1061,6 @@ func TestExpandAutoscaleConfiguration(t *testing.T) {
"downscale_wait_time": 300,
},
},
- "gpu_workers": []interface{}{},
- },
- },
- },
- {
- expected: &api.AutoscaleConfiguration{
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 10,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
- },
- input: []interface{}{
- map[string]interface{}{
- "non_gpu_workers": []interface{}{},
- "gpu_workers": []interface{}{
- map[string]interface{}{
- "instance_type": "gpu-node",
- "disk_size": 512,
- "min_workers": 1,
- "max_workers": 10,
- "standby_workers": 0.4,
- "downscale_wait_time": 200,
- },
- },
},
},
},
@@ -1335,14 +1215,6 @@ func TestFlattenClusters(t *testing.T) {
StandbyWorkers: 0.5,
DownscaleWaitTime: 300,
},
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "auto-gpu-node-1",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 5,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
},
InitScript: "#!/usr/bin/env bash\nset -e\necho 'Hello World'",
AWS: api.AWSCluster{
@@ -1456,14 +1328,6 @@ func TestFlattenClusters(t *testing.T) {
StandbyWorkers: 0.5,
DownscaleWaitTime: 300,
},
- GPU: &api.AutoscaleConfigurationBase{
- InstanceType: "auto-gpu-node-1",
- DiskSize: 512,
- MinWorkers: 1,
- MaxWorkers: 5,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 200,
- },
},
InitScript: "#!/usr/bin/env bash\nset -e\necho 'Hello World 2'",
Azure: api.AzureCluster{
diff --git a/hopsworksai/internal/structure/supported_instance_types.go b/hopsworksai/internal/structure/supported_instance_types.go
index d3dc968..65b5aae 100644
--- a/hopsworksai/internal/structure/supported_instance_types.go
+++ b/hopsworksai/internal/structure/supported_instance_types.go
@@ -17,7 +17,6 @@ func flattenSupportedInstanceType(instanceType *api.SupportedInstanceType) map[s
"id": instanceType.Id,
"memory": instanceType.Memory,
"cpus": instanceType.CPUs,
- "gpus": instanceType.GPUs,
"with_nvme": instanceType.WithNVMe,
}
}
diff --git a/hopsworksai/internal/structure/supported_instance_types_test.go b/hopsworksai/internal/structure/supported_instance_types_test.go
index 21f4626..e8cbeba 100644
--- a/hopsworksai/internal/structure/supported_instance_types_test.go
+++ b/hopsworksai/internal/structure/supported_instance_types_test.go
@@ -12,7 +12,6 @@ func TestFlattenSupportedInstanceType(t *testing.T) {
Id: "node-type",
CPUs: 10,
Memory: 30,
- GPUs: 1,
WithNVMe: false,
}
@@ -20,7 +19,6 @@ func TestFlattenSupportedInstanceType(t *testing.T) {
"id": "node-type",
"cpus": 10,
"memory": 30.0,
- "gpus": 1,
"with_nvme": false,
}
@@ -36,14 +34,12 @@ func TestFlattenSupportedInstanceTypes(t *testing.T) {
Id: "node-type-1",
CPUs: 10,
Memory: 30,
- GPUs: 1,
WithNVMe: false,
},
{
Id: "node-type-2",
CPUs: 5,
Memory: 20,
- GPUs: 0,
WithNVMe: false,
},
}
@@ -53,14 +49,12 @@ func TestFlattenSupportedInstanceTypes(t *testing.T) {
"id": "node-type-1",
"cpus": 10,
"memory": 30.0,
- "gpus": 1,
"with_nvme": false,
},
{
"id": "node-type-2",
"cpus": 5,
"memory": 20.0,
- "gpus": 0,
"with_nvme": false,
},
}
diff --git a/hopsworksai/resource_cluster.go b/hopsworksai/resource_cluster.go
index ad7b032..6d55577 100644
--- a/hopsworksai/resource_cluster.go
+++ b/hopsworksai/resource_cluster.go
@@ -374,13 +374,6 @@ func clusterSchema() map[string]*schema.Schema {
MaxItems: 1,
Elem: autoscaleSchema(),
},
- "gpu_workers": {
- Description: "Setup auto scaling for gpu nodes.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: autoscaleSchema(),
- },
},
},
},
@@ -1658,11 +1651,6 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int
// required field
autoscaleConfig.NonGPU = structure.ExpandAutoscaleConfigurationBase(nonGpuConfig[0].(map[string]interface{}))
- gpuConfig := newConfig["gpu_workers"].([]interface{})
- if len(gpuConfig) > 0 {
- autoscaleConfig.GPU = structure.ExpandAutoscaleConfigurationBase(gpuConfig[0].(map[string]interface{}))
- }
-
if err := api.ConfigureAutoscale(ctx, client, clusterId, autoscaleConfig); err != nil {
return diag.FromErr(err)
}
diff --git a/hopsworksai/resource_cluster_test.go b/hopsworksai/resource_cluster_test.go
index f17e33f..f934ce2 100644
--- a/hopsworksai/resource_cluster_test.go
+++ b/hopsworksai/resource_cluster_test.go
@@ -557,38 +557,6 @@ func testAccCluster_Autoscale(t *testing.T, cloud api.CloudProvider) {
ImportState: true,
ImportStateVerify: true,
},
- {
- Config: testAccClusterConfig_Autoscale(cloud, rName, suffix, fmt.Sprintf(`
- autoscale {
- non_gpu_workers {
- instance_type = "%s"
- }
-
- gpu_workers {
- instance_type = "%s"
- }
- }
- `, testWorkerInstanceType1(cloud), testWorkerInstanceTypeWithGPU(cloud))),
- Check: resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(resourceName, "state", api.Running.String()),
- resource.TestCheckResourceAttr(resourceName, "activation_state", api.Stoppable.String()),
- resource.TestCheckResourceAttr(resourceName, "update_state", "none"),
- resource.TestCheckResourceAttr(resourceName, "workers.#", "0"),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.non_gpu_workers.0.instance_type", testWorkerInstanceType1(cloud)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.non_gpu_workers.0.disk_size", strconv.Itoa(defaultAutoscaleConfig.DiskSize)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.non_gpu_workers.0.min_workers", strconv.Itoa(defaultAutoscaleConfig.MinWorkers)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.non_gpu_workers.0.max_workers", strconv.Itoa(defaultAutoscaleConfig.MaxWorkers)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.non_gpu_workers.0.standby_workers", fmt.Sprint(defaultAutoscaleConfig.StandbyWorkers)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.non_gpu_workers.0.downscale_wait_time", strconv.Itoa(defaultAutoscaleConfig.DownscaleWaitTime)),
-
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.gpu_workers.0.instance_type", testWorkerInstanceTypeWithGPU(cloud)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.gpu_workers.0.disk_size", strconv.Itoa(defaultAutoscaleConfig.DiskSize)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.gpu_workers.0.min_workers", strconv.Itoa(defaultAutoscaleConfig.MinWorkers)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.gpu_workers.0.max_workers", strconv.Itoa(defaultAutoscaleConfig.MaxWorkers)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.gpu_workers.0.standby_workers", fmt.Sprint(defaultAutoscaleConfig.StandbyWorkers)),
- resource.TestCheckResourceAttr(resourceName, "autoscale.0.gpu_workers.0.downscale_wait_time", strconv.Itoa(defaultAutoscaleConfig.DownscaleWaitTime)),
- ),
- },
},
})
}
@@ -821,15 +789,6 @@ func testAccCluster_RonDB_upscale(t *testing.T, cloud api.CloudProvider, managem
})
}
-func testWorkerInstanceTypeWithGPU(cloud api.CloudProvider) string {
- if cloud == api.AWS {
- return "g3s.xlarge"
- } else if cloud == api.AZURE {
- return "Standard_NC6s_v3"
- }
- return ""
-}
-
func testWorkerInstanceType1(cloud api.CloudProvider) string {
return testWorkerInstanceType(cloud, true)
}
@@ -2561,13 +2520,11 @@ func testClusterCreate_RonDB_invalidReplicationFactor(t *testing.T, cloud api.Cl
}
func TestClusterCreate_Autoscale(t *testing.T) {
- testClusterCreate_Autoscale(t, api.AWS, true)
- testClusterCreate_Autoscale(t, api.AZURE, true)
- testClusterCreate_Autoscale(t, api.AWS, false)
- testClusterCreate_Autoscale(t, api.AZURE, false)
+ testClusterCreate_Autoscale(t, api.AWS)
+ testClusterCreate_Autoscale(t, api.AZURE)
}
-func testClusterCreate_Autoscale(t *testing.T, cloud api.CloudProvider, withGpu bool) {
+func testClusterCreate_Autoscale(t *testing.T, cloud api.CloudProvider) {
state := map[string]interface{}{
"name": "cluster",
"head": []interface{}{
@@ -2597,18 +2554,6 @@ func testClusterCreate_Autoscale(t *testing.T, cloud api.CloudProvider, withGpu
},
}
- if withGpu {
- state["autoscale"].([]interface{})[0].(map[string]interface{})["gpu_workers"] = []interface{}{
- map[string]interface{}{
- "instance_type": "gpu-node",
- "disk_size": 200,
- "min_workers": 1,
- "max_workers": 5,
- "standby_workers": 0.4,
- "downscale_wait_time": 100,
- },
- }
- }
if cloud == api.AWS {
state["aws_attributes"] = []interface{}{
map[string]interface{}{
@@ -2668,16 +2613,6 @@ func testClusterCreate_Autoscale(t *testing.T, cloud api.CloudProvider, withGpu
},
},
}
- if withGpu {
- expected.GPU = &api.AutoscaleConfigurationBase{
- InstanceType: "gpu-node",
- DiskSize: 200,
- MinWorkers: 1,
- MaxWorkers: 5,
- StandbyWorkers: 0.4,
- DownscaleWaitTime: 100,
- }
- }
if !reflect.DeepEqual(&expected, output) {
return fmt.Errorf("error while matching:\nexpected %#v \nbut got %#v", expected, output)
}