modules/mu/clouds/google/container_cluster.rb in cloud-mu-3.1.5 vs modules/mu/clouds/google/container_cluster.rb in cloud-mu-3.1.6

- old
+ new

@@ -472,11 +472,10 @@ end MU.log %Q{How to interact with your GKE cluster\nkubectl --kubeconfig "#{kube_conf}" get events --all-namespaces\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY end - # Locate an existing ContainerCluster or ContainerClusters and return an array containing matching GCP resource descriptors for those that match. # @return [Array<Hash<String,OpenStruct>>]: The cloud provider's complete descriptions of matching ContainerClusters def self.find(**args) args = MU::Cloud::Google.findLocationArgs(args) @@ -745,19 +744,19 @@ # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server # @param region [String]: The cloud provider region in which to operate # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) clusters = [] # Make sure we catch regional *and* zone clusters - found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['project']}/locations/#{region}") + found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['habitat']}/locations/#{region}") clusters.concat(found.clusters) if found and found.clusters MU::Cloud::Google.listAZs(region).each { |az| - found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['project']}/locations/#{az}") + found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['habitat']}/locations/#{az}") clusters.concat(found.clusters) if found and found.clusters } clusters.uniq.each { |cluster| if !cluster.resource_labels or ( @@ -1095,11 +1094,11 @@ break end } if !match MU.log "No version matching #{cluster['kubernetes']['version']} available, will try floating minor revision", MU::WARN - cluster['kubernetes']['version'].sub!(/^(\d+\.\d+\.).*/i, '\1') + cluster['kubernetes']['version'].sub!(/^(\d+\.\d+)\..*/i, '\1') master_versions.each { |v| if v.match(/^#{Regexp.quote(cluster['kubernetes']['version'])}/) match = true break end @@ -1143,10 +1142,14 @@ end cluster['instance_type'] = MU::Cloud::Google::Server.validateInstanceType(cluster["instance_type"], cluster["region"], project: cluster['project'], credentials: cluster['credentials']) ok = false if cluster['instance_type'].nil? + if !MU::Master.kubectl + MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN + end + ok end private @@ -1234,10 +1237,10 @@ } # Take this opportunity to ensure that the 'client' service account # used by certificate authentication exists and has appropriate # privilege - if @username and @password + if @username and @password and MU::Master.kubectl File.open(client_binding, "w"){ |k| k.puts <<-EOF kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: