summaryrefslogtreecommitdiff
path: root/app/models
diff options
context:
space:
mode:
Diffstat (limited to 'app/models')
-rw-r--r--app/models/ci/pipeline.rb10
-rw-r--r--app/models/ci/pipeline_schedule.rb25
-rw-r--r--app/models/clusters/applications/knative.rb48
-rw-r--r--app/models/clusters/cluster.rb4
-rw-r--r--app/models/key.rb5
5 files changed, 36 insertions, 56 deletions
diff --git a/app/models/ci/pipeline.rb b/app/models/ci/pipeline.rb
index 80401ca0a1e..3727a9861aa 100644
--- a/app/models/ci/pipeline.rb
+++ b/app/models/ci/pipeline.rb
@@ -166,6 +166,16 @@ module Ci
end
end
+ after_transition any => ::Ci::Pipeline.completed_statuses do |pipeline|
+ pipeline.run_after_commit do
+ pipeline.all_merge_requests.each do |merge_request|
+ next unless merge_request.auto_merge_enabled?
+
+ AutoMergeProcessWorker.perform_async(merge_request.id)
+ end
+ end
+ end
+
after_transition any => [:success, :failed] do |pipeline|
pipeline.run_after_commit do
PipelineNotificationWorker.perform_async(pipeline.id)
diff --git a/app/models/ci/pipeline_schedule.rb b/app/models/ci/pipeline_schedule.rb
index c0a0ca9acf6..c40ad39be61 100644
--- a/app/models/ci/pipeline_schedule.rb
+++ b/app/models/ci/pipeline_schedule.rb
@@ -27,9 +27,13 @@ module Ci
scope :active, -> { where(active: true) }
scope :inactive, -> { where(active: false) }
+ scope :runnable_schedules, -> { active.where("next_run_at < ?", Time.now) }
+ scope :preloaded, -> { preload(:owner, :project) }
accepts_nested_attributes_for :variables, allow_destroy: true
+ alias_attribute :real_next_run, :next_run_at
+
def owned_by?(current_user)
owner == current_user
end
@@ -46,8 +50,14 @@ module Ci
update_attribute(:active, false)
end
+ ##
+ # The `next_run_at` column is set to the actual execution date of `PipelineScheduleWorker`.
+ # This way, a schedule like `*/1 * * * *` won't be triggered in a short interval
+ # when PipelineScheduleWorker runs irregularly by Sidekiq Memory Killer.
def set_next_run_at
- self.next_run_at = Gitlab::Ci::CronParser.new(cron, cron_timezone).next_time_from(Time.now)
+ self.next_run_at = Gitlab::Ci::CronParser.new(Settings.cron_jobs['pipeline_schedule_worker']['cron'],
+ Time.zone.name)
+ .next_time_from(ideal_next_run_at)
end
def schedule_next_run!
@@ -56,15 +66,14 @@ module Ci
update_attribute(:next_run_at, nil) # update without validation
end
- def real_next_run(
- worker_cron: Settings.cron_jobs['pipeline_schedule_worker']['cron'],
- worker_time_zone: Time.zone.name)
- Gitlab::Ci::CronParser.new(worker_cron, worker_time_zone)
- .next_time_from(next_run_at)
- end
-
def job_variables
variables&.map(&:to_runner_variable) || []
end
+
+ private
+
+ def ideal_next_run_at
+ Gitlab::Ci::CronParser.new(cron, cron_timezone).next_time_from(Time.now)
+ end
end
end
diff --git a/app/models/clusters/applications/knative.rb b/app/models/clusters/applications/knative.rb
index 9fbf5d8af04..d5a3bd62e3d 100644
--- a/app/models/clusters/applications/knative.rb
+++ b/app/models/clusters/applications/knative.rb
@@ -15,9 +15,6 @@ module Clusters
include ::Clusters::Concerns::ApplicationVersion
include ::Clusters::Concerns::ApplicationData
include AfterCommitQueue
- include ReactiveCaching
-
- self.reactive_cache_key = ->(knative) { [knative.class.model_name.singular, knative.id] }
def set_initial_status
return unless not_installable?
@@ -41,8 +38,6 @@ module Clusters
scope :for_cluster, -> (cluster) { where(cluster: cluster) }
- after_save :clear_reactive_cache!
-
def chart
'knative/knative'
end
@@ -77,55 +72,12 @@ module Clusters
ClusterWaitForIngressIpAddressWorker.perform_async(name, id)
end
- def client
- cluster.kubeclient.knative_client
- end
-
- def services
- with_reactive_cache do |data|
- data[:services]
- end
- end
-
- def calculate_reactive_cache
- { services: read_services, pods: read_pods }
- end
-
def ingress_service
cluster.kubeclient.get_service('istio-ingressgateway', 'istio-system')
end
- def services_for(ns: namespace)
- return [] unless services
- return [] unless ns
-
- services.select do |service|
- service.dig('metadata', 'namespace') == ns
- end
- end
-
- def service_pod_details(ns, service)
- with_reactive_cache do |data|
- data[:pods].select { |pod| filter_pods(pod, ns, service) }
- end
- end
-
private
- def read_pods
- cluster.kubeclient.core_client.get_pods.as_json
- end
-
- def filter_pods(pod, namespace, service)
- pod["metadata"]["namespace"] == namespace && pod["metadata"]["labels"]["serving.knative.dev/service"] == service
- end
-
- def read_services
- client.get_services.as_json
- rescue Kubeclient::ResourceNotFoundError
- []
- end
-
def install_knative_metrics
["kubectl apply -f #{METRICS_CONFIG}"] if cluster.application_prometheus_available?
end
diff --git a/app/models/clusters/cluster.rb b/app/models/clusters/cluster.rb
index 57a1e461b2d..e1d6b2a802b 100644
--- a/app/models/clusters/cluster.rb
+++ b/app/models/clusters/cluster.rb
@@ -223,6 +223,10 @@ module Clusters
end
end
+ def knative_services_finder(project)
+ @knative_services_finder ||= KnativeServicesFinder.new(self, project)
+ end
+
private
def instance_domain
diff --git a/app/models/key.rb b/app/models/key.rb
index b097be8cc89..8aa25924c28 100644
--- a/app/models/key.rb
+++ b/app/models/key.rb
@@ -59,6 +59,11 @@ class Key < ApplicationRecord
"key-#{id}"
end
+ # EE overrides this
+ def can_delete?
+ true
+ end
+
# rubocop: disable CodeReuse/ServiceClass
def update_last_used_at
Keys::LastUsedService.new(self).execute