Skip to content

Commit c3ae2a8

Browse files
committed
feat(k8s): wait for pool to be ready and add 'nodes' in its output
1 parent 72edd8c commit c3ae2a8

7 files changed

+397
-31
lines changed

scaleway/helpers_k8s.go

+44
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ type KubeconfigStruct struct {
3737
const (
3838
K8SClusterWaitForReadyTimeout = 10 * time.Minute
3939
K8SClusterWaitForDeletedTimeout = 10 * time.Minute
40+
K8SPoolWaitForReadyTimeout = 10 * time.Minute
4041
)
4142

4243
func k8sAPIWithRegion(d *schema.ResourceData, m interface{}) (*k8s.API, scw.Region, error) {
@@ -88,6 +89,49 @@ func waitK8SClusterDeleted(k8sAPI *k8s.API, region scw.Region, clusterID string)
8889
return fmt.Errorf("Cluster %s has state %s, wants %s", clusterID, cluster.Status.String(), k8s.ClusterStatusDeleted.String())
8990
}
9091

92+
func waitK8SPoolReady(k8sAPI *k8s.API, region scw.Region, poolID string) error {
93+
return k8sAPI.WaitForPool(&k8s.WaitForPoolRequest{
94+
PoolID: poolID,
95+
Region: region,
96+
Timeout: scw.DurationPtr(K8SPoolWaitForReadyTimeout),
97+
})
98+
}
99+
100+
// convert a list of nodes to a list of map
101+
func convertNodes(res *k8s.ListNodesResponse) []map[string]interface{} {
102+
var result []map[string]interface{}
103+
for _, node := range res.Nodes {
104+
n := make(map[string]interface{})
105+
n["name"] = node.Name
106+
n["pool_id"] = node.PoolID
107+
n["status"] = node.Status.String()
108+
if node.PublicIPV4 != nil && node.PublicIPV4.String() != "<nil>" {
109+
n["public_ip"] = node.PublicIPV4.String()
110+
}
111+
if node.PublicIPV6 != nil && node.PublicIPV6.String() != "<nil>" {
112+
n["public_ip_v6"] = node.PublicIPV6.String()
113+
}
114+
result = append(result, n)
115+
}
116+
return result
117+
}
118+
119+
func getNodes(k8sAPI *k8s.API, pool *k8s.Pool) (interface{}, error) {
120+
req := &k8s.ListNodesRequest{
121+
Region: pool.Region,
122+
ClusterID: pool.ClusterID,
123+
PoolID: &pool.ID,
124+
}
125+
126+
nodes, err := k8sAPI.ListNodes(req, scw.WithAllPages())
127+
128+
if err != nil {
129+
return nil, err
130+
}
131+
132+
return convertNodes(nodes), nil
133+
}
134+
91135
func clusterAutoscalerConfigFlatten(cluster *k8s.Cluster) []map[string]interface{} {
92136
autoscalerConfig := map[string]interface{}{}
93137
autoscalerConfig["disable_scale_down"] = cluster.AutoscalerConfig.ScaleDownDisabled

scaleway/resource_k8s_cluster_beta.go

+104-30
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,12 @@ func resourceScalewayK8SClusterBeta() *schema.Resource {
194194
k8s.RuntimeCrio.String(),
195195
}, false),
196196
},
197+
"wait_for_pool_ready": {
198+
Type: schema.TypeBool,
199+
Optional: true,
200+
Default: false,
201+
Description: "Whether to wait for the pool to be ready",
202+
},
197203
// Computed elements
198204
"pool_id": {
199205
Type: schema.TypeString,
@@ -210,6 +216,39 @@ func resourceScalewayK8SClusterBeta() *schema.Resource {
210216
Computed: true,
211217
Description: "The date and time of the last update of the default pool",
212218
},
219+
"nodes": {
220+
Type: schema.TypeList,
221+
Computed: true,
222+
Elem: &schema.Resource{
223+
Schema: map[string]*schema.Schema{
224+
"name": {
225+
Type: schema.TypeString,
226+
Computed: true,
227+
Description: "The name of the node",
228+
},
229+
"pool_id": {
230+
Type: schema.TypeString,
231+
Computed: true,
232+
Description: "The pool ID whose the node belongs to",
233+
},
234+
"status": {
235+
Type: schema.TypeString,
236+
Computed: true,
237+
Description: "The status of the node",
238+
},
239+
"public_ip": {
240+
Type: schema.TypeString,
241+
Computed: true,
242+
Description: "The public IPv4 address of the node",
243+
},
244+
"public_ip_v6": {
245+
Type: schema.TypeString,
246+
Computed: true,
247+
Description: "The public IPv6 address of the node",
248+
},
249+
},
250+
},
251+
},
213252
"status": {
214253
Type: schema.TypeString,
215254
Computed: true,
@@ -408,51 +447,42 @@ func resourceScalewayK8SClusterBetaCreate(d *schema.ResourceData, m interface{})
408447

409448
d.SetId(newRegionalId(region, res.ID))
410449

411-
err = waitK8SClusterReady(k8sAPI, region, res.ID)
450+
err = waitK8SClusterReady(k8sAPI, region, res.ID) // wait for the cluster status to be ready
412451
if err != nil {
413452
return err
414453
}
415454

455+
if d.Get("default_pool.0.wait_for_pool_ready").(bool) { // wait for the pool status to be ready (if specified)
456+
pool, err := readDefaultPool(d, m) // ensure that 'default_pool.0.pool_id' is set
457+
if err != nil {
458+
return err
459+
}
460+
461+
err = waitK8SPoolReady(k8sAPI, region, expandID(pool.ID))
462+
if err != nil {
463+
return err
464+
}
465+
}
466+
416467
return resourceScalewayK8SClusterBetaRead(d, m)
417468
}
418469

419470
// resourceScalewayK8SClusterBetaDefaultPoolRead is only called after a resourceScalewayK8SClusterBetaCreate
420471
// thus ensuring the uniqueness of the only pool listed
421472
func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m interface{}) error {
422-
k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id())
473+
k8sAPI, region, _, err := k8sAPIWithRegionAndID(m, d.Id())
423474
if err != nil {
424475
return err
425476
}
426477

427-
////
428-
// Read default Pool
429-
////
430-
431-
var pool *k8s.Pool
432-
433-
if defaultPoolID, ok := d.GetOk("default_pool.0.pool_id"); ok {
434-
poolResp, err := k8sAPI.GetPool(&k8s.GetPoolRequest{
435-
Region: region,
436-
PoolID: expandID(defaultPoolID.(string)),
437-
})
438-
if err != nil {
439-
return err
440-
}
441-
pool = poolResp
442-
} else {
443-
response, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{
444-
Region: region,
445-
ClusterID: clusterID,
446-
})
447-
if err != nil {
448-
return err
449-
}
450-
451-
if len(response.Pools) != 1 {
452-
return fmt.Errorf("Newly created pool on cluster %s has %d pools instead of 1", clusterID, len(response.Pools))
453-
}
478+
pool, err := readDefaultPool(d, m)
479+
if err != nil {
480+
return err
481+
}
454482

455-
pool = response.Pools[0]
483+
nodes, err := getNodes(k8sAPI, pool)
484+
if err != nil {
485+
return err
456486
}
457487

458488
defaultPool := map[string]interface{}{}
@@ -466,6 +496,8 @@ func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m int
466496
defaultPool["container_runtime"] = pool.ContainerRuntime
467497
defaultPool["created_at"] = pool.CreatedAt.String()
468498
defaultPool["updated_at"] = pool.UpdatedAt.String()
499+
defaultPool["nodes"] = nodes
500+
defaultPool["wait_for_pool_ready"] = d.Get("default_pool.0.wait_for_pool_ready")
469501
defaultPool["status"] = pool.Status.String()
470502

471503
if pool.PlacementGroupID != nil {
@@ -479,6 +511,41 @@ func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m int
479511
return nil
480512
}
481513

514+
func readDefaultPool(d *schema.ResourceData, m interface{}) (*k8s.Pool, error) {
515+
k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id())
516+
if err != nil {
517+
return nil, err
518+
}
519+
520+
var pool *k8s.Pool
521+
522+
if defaultPoolID, ok := d.GetOk("default_pool.0.pool_id"); ok {
523+
poolResp, err := k8sAPI.GetPool(&k8s.GetPoolRequest{
524+
Region: region,
525+
PoolID: expandID(defaultPoolID.(string)),
526+
})
527+
if err != nil {
528+
return nil, err
529+
}
530+
pool = poolResp
531+
} else {
532+
response, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{
533+
Region: region,
534+
ClusterID: clusterID,
535+
})
536+
if err != nil {
537+
return nil, err
538+
}
539+
540+
if len(response.Pools) != 1 {
541+
return nil, fmt.Errorf("Newly created pool on cluster %s has %d pools instead of 1", clusterID, len(response.Pools))
542+
}
543+
544+
pool = response.Pools[0]
545+
}
546+
return pool, nil
547+
}
548+
482549
func resourceScalewayK8SClusterBetaRead(d *schema.ResourceData, m interface{}) error {
483550
k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id())
484551
if err != nil {
@@ -662,6 +729,13 @@ func resourceScalewayK8SClusterBetaDefaultPoolUpdate(d *schema.ResourceData, m i
662729
}
663730
}
664731
}
732+
733+
if d.Get("default_pool.0.wait_for_pool_ready").(bool) { // wait for the pool to be ready if specified
734+
err = waitK8SPoolReady(k8sAPI, region, expandID(defaultPoolID))
735+
if err != nil {
736+
return err
737+
}
738+
}
665739
}
666740

667741
return resourceScalewayK8SClusterBetaDefaultPoolRead(d, m)

scaleway/resource_k8s_cluster_beta_test.go

+78
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,66 @@ func TestAccScalewayK8SClusterBetaDefaultPoolRecreate(t *testing.T) {
249249
})
250250
}
251251

252+
func TestAccScalewayK8SClusterBetaDefaultPoolWait(t *testing.T) {
253+
resource.ParallelTest(t, resource.TestCase{
254+
PreCheck: func() { testAccPreCheck(t) },
255+
Providers: testAccProviders,
256+
CheckDestroy: testAccCheckScalewayK8SClusterBetaDestroy,
257+
Steps: []resource.TestStep{
258+
{
259+
Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 1),
260+
Check: resource.ComposeTestCheckFunc(
261+
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"),
262+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "version", "1.17.3"),
263+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "cni", "cilium"),
264+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()),
265+
resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"),
266+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "1"),
267+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.node_type", "gp1_xs"),
268+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"),
269+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "1"),
270+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()),
271+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()),
272+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"),
273+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.0", "terraform-test"),
274+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.1", "scaleway_k8s_cluster_beta"),
275+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.2", "default-pool"),
276+
),
277+
},
278+
{
279+
Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 2), // add a node and wait for the pool to be ready
280+
Check: resource.ComposeTestCheckFunc(
281+
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"),
282+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()),
283+
resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"),
284+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "2"),
285+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"),
286+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "2"),
287+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()),
288+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()),
289+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.1.status", k8s.NodeStatusReady.String()), // check that the new node has the "ready" status
290+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"),
291+
),
292+
},
293+
{
294+
Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 1), // remove a node and wait for the pool to be ready
295+
Check: resource.ComposeTestCheckFunc(
296+
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"),
297+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()),
298+
resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"),
299+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "1"),
300+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"),
301+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "1"),
302+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()),
303+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()),
304+
resource.TestCheckNoResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.1"), // check that the second node does not exist anymore
305+
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"),
306+
),
307+
},
308+
},
309+
})
310+
}
311+
252312
func TestAccScalewayK8SClusterBetaAutoUpgrade(t *testing.T) {
253313
resource.ParallelTest(t, resource.TestCase{
254314
PreCheck: func() { testAccPreCheck(t) },
@@ -411,6 +471,24 @@ resource "scaleway_k8s_cluster_beta" "pool" {
411471
}`, version)
412472
}
413473

474+
func testAccCheckScalewayK8SClusterBetaConfigPoolWait(version string, size int) string {
475+
return fmt.Sprintf(`
476+
resource "scaleway_k8s_cluster_beta" "pool" {
477+
cni = "cilium"
478+
version = "%s"
479+
name = "default-pool"
480+
default_pool {
481+
node_type = "gp1_xs"
482+
size = %d
483+
min_size = 1
484+
max_size = %d
485+
container_runtime = "docker"
486+
wait_for_pool_ready = true
487+
}
488+
tags = [ "terraform-test", "scaleway_k8s_cluster_beta", "default-pool" ]
489+
}`, version, size, size)
490+
}
491+
414492
func testAccCheckScalewayK8SClusterBetaConfigPoolWithPlacementGroup(version string) string {
415493
return fmt.Sprintf(`
416494
resource "scaleway_instance_placement_group" "pool_placement_group" {

0 commit comments

Comments
 (0)