lib/configgin.rb in configgin-0.19.6 vs lib/configgin.rb in configgin-0.20.0

- old
+ new

@@ -65,21 +65,20 @@ job.generate(infile, outfile, dns_encoder) end end end - # Write exported properties to secret and potentially restart affected pods. + # Write exported properties to secret and update annotations on importing stateful sets. def export_job_properties(jobs) - # co-located containers don't get to export properties + # Co-located containers don't get to export properties. return unless instance_group == ENV["KUBERNETES_CONTAINER_NAME"] - # jobs don't export properties + # Jobs (errands) don't export properties. return unless self_pod['metadata']['ownerReferences'][0]['kind'] == "StatefulSet" sts = kube_client_stateful_set.get_stateful_set(instance_group, kube_namespace) # Make sure the secret attached to the stateful set exists. - # XXX This should probably be done by fissile via the helm chart. secret = Kubeclient::Resource.new secret.metadata = { name: sts.metadata.name, namespace: kube_namespace, ownerReferences: [ @@ -98,39 +97,38 @@ rescue end secret = kube_client.get_secret(instance_group, kube_namespace) secret.data ||= {} + # version tag changes whenever the chart version or the secrets generation changes version_tag = ENV["CONFIGGIN_VERSION_TAG"] new_tag = !secret.data[version_tag] secret.data = {version_tag => ""} if new_tag # make sure old properties are deleted during upgrade digests = {} jobs.each do |name, job| - digests[name] = property_digest(job.exported_properties) secret.data["skiff-exported-properties-#{name}"] = Base64.encode64(job.exported_properties.to_json) + digests[name] = property_digest(job.exported_properties) - encoded_digest = Base64.encode64(digests[name]) - # Record initial digest values whenever the tag changes, in which case the pod startup # order is already controlled by the "CONFIGGIN_IMPORT_#{role}" references to the new # tags in the corresponding secrets. There is no annotation when importing this set of # initial values because the helm chart doesn't include any annotations, and we don't # want to trigger a pod restart by adding them. + encoded_digest = Base64.encode64(digests[name]) if new_tag secret.data["skiff-initial-digest-#{name}"] = encoded_digest end if secret.data["skiff-initial-digest-#{name}"] == encoded_digest digests[name] = nil end end - kube_client.update_secret(secret) - # Some pods might have depended on the properties exported by this pod; given - # the annotations expected on the pods (keyed by the instance group name), - # patch the StatefulSets such that they will be restarted. + # Some pods might depend on the properties exported by this pod; add annotations + # to the template spec of the stateful sets so that the pods will be restarted if + # the exported values have changed from the initial values. expected_annotations(@job_configs, digests).each_pair do |instance_group_name, digests| # Avoid restarting our own pod next if instance_group_name == instance_group begin @@ -141,20 +139,21 @@ response = JSON.parse(e.response || '') rescue JSON::ParseError response = {} end if response['reason'] == 'NotFound' - # The StatefulSet can be missing if we're configured to not have an - # optional instance group. + # The StatefulSet can be missing if we're configured to not have an optional instance group. warn "Skipping patch of non-existant StatefulSet #{instance_group_name}" next end - warn "Error patching #{instance_group_name}: #{response.to_json}" + warn "Error fetching stateful set #{instance_group_name}: #{response.to_json}" raise end end + # Update annotations to match digests for current property values. The stateful set will + # only restarts pods when the checksum of the pod spec changes, so no-op "updates" are ok. annotations = {} sts.spec.template.metadata.annotations.each_pair do |key, value| annotations[key] = value end digests.each_pair do |key, value| @@ -164,11 +163,10 @@ kube_client_stateful_set.merge_patch_stateful_set( instance_group_name, { spec: { template: { metadata: { annotations: annotations } } } }, kube_namespace ) - warn "Patched StatefulSet #{instance_group_name} for new exported digests" end end # Given the active jobs, and a hash of the expected annotations for each, # return the annotations we expect to be on each pod based on what properties @@ -190,10 +188,10 @@ def kube_namespace @kube_namespace ||= File.read("#{SVC_ACC_PATH}/namespace") end def kube_token - @kube_token ||= File.read("#{SVC_ACC_PATH}/token") + @kube_token ||= ENV['CONFIGGIN_SA_TOKEN'] || File.read("#{SVC_ACC_PATH}/token") end private def create_kube_client(path: nil, version: 'v1')