diff --git a/Dockerfile b/Dockerfile index 3f7db141..f531d9e6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -167,6 +167,25 @@ RUN set -eux; \ done RUN set -eux; \ # Copy commonly required runtime shared libraries (no loop) + cp -a /usr/lib64/libcurl.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libnghttp2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libidn2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libpsl.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libssh2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libunistring.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libnettle.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libhogweed.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libgnutls.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libgmp.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libbrotlidec.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libbrotlicommon.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libzstd.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libnss3.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libnssutil3.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libsmime3.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libplc4.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libplds4.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libnspr4.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ cp -a /usr/lib64/libpq.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ cp -a /usr/lib64/libssl.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ cp -a /usr/lib64/libcrypto.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ diff --git a/Gemfile b/Gemfile index 56a4d836..040a1623 100644 --- a/Gemfile +++ b/Gemfile @@ -25,6 +25,7 @@ gem 'rake', '~> 13.2' gem 'rdoc', '~> 6.15.0' gem 'rubyzip', '~> 2.4', require: 'zip' gem 'swagger-blocks', '~> 3.0.0' +gem 'typhoeus', '~> 1.5' # Persistence gem 'activerecord-import', '~> 2.1' diff --git a/Gemfile.lock b/Gemfile.lock index 5d21bcbe..f62b5900 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -79,6 +79,7 @@ GEM benchmark (0.5.0) bigdecimal (4.0.1) builder (3.3.0) + byebug (12.0.0) childprocess (5.1.0) logger (~> 1.5) chronic (0.10.2) @@ -135,11 +136,15 @@ GEM encryptor (3.0.0) erb (6.0.2) erubi (1.13.1) + ethon (0.15.0) + ffi (>= 1.15.0) factory_bot (6.5.6) activesupport (>= 6.1.0) faker (3.6.0) i18n (>= 1.8.11, < 2) + ffi (1.17.3) ffi (1.17.3-x64-mingw-ucrt) + ffi (1.17.3-x86_64-linux-gnu) ffi-compiler (1.3.2) ffi (>= 1.15.5) rake @@ -231,6 +236,7 @@ GEM logger mime-types-data (~> 3.2025, >= 3.2025.0507) mime-types-data (3.2026.0224) + mini_portile2 (2.8.9) minitest (6.0.2) drb (~> 2.0) prism (~> 1.5) @@ -243,8 +249,13 @@ GEM netrc (0.11.0) newrelic_rpm (9.24.0) nio4r (2.7.5) + nokogiri (1.19.1) + mini_portile2 (~> 2.8.2) + racc (~> 1.4) nokogiri (1.19.1-x64-mingw-ucrt) racc (~> 1.4) + nokogiri (1.19.1-x86_64-linux-gnu) + racc (~> 1.4) ostruct (0.6.3) overcommit (0.68.0) childprocess (>= 0.6.3, < 6) @@ -257,6 +268,8 @@ GEM parser (3.3.10.2) ast (~> 2.4.1) racc + pg (1.6.3) + pg (1.6.3-x86_64-linux) pg_search (2.3.7) activerecord (>= 6.1) activesupport (>= 6.1) @@ -393,6 +406,8 @@ GEM readline sync tsort (0.2.0) + typhoeus (1.5.0) + ethon (>= 0.9.0, < 0.16.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) unicode-display_width (3.2.0) @@ -416,7 +431,9 @@ GEM zeitwerk (2.7.5) PLATFORMS + aarch64-linux x64-mingw-ucrt + x86_64-linux DEPENDENCIES activejob (= 8.0.2.1) @@ -491,6 +508,7 @@ DEPENDENCIES simplecov (>= 0.21.2) simplecov_json_formatter swagger-blocks (~> 3.0.0) + typhoeus (~> 1.5) uuid (~> 2.3) vcr (~> 6.3) virtus (~> 2.0) diff --git a/app/api/entities/envelope_download.rb b/app/api/entities/envelope_download.rb index c69c2b5d..be56f5fd 100644 --- a/app/api/entities/envelope_download.rb +++ b/app/api/entities/envelope_download.rb @@ -5,6 +5,9 @@ class EnvelopeDownload < Grape::Entity expose :display_status, as: :status, documentation: { type: 'string', desc: 'Status of download' } + expose :last_published_at, + documentation: { type: 'string', desc: 'Timestamp of the latest publish event included in this download' } + expose :enqueued_at, documentation: { type: 'string', desc: 'When the download was enqueued' }, if: ->(object) { object.pending? } @@ -20,6 +23,10 @@ class EnvelopeDownload < Grape::Entity expose :url, documentation: { type: 'string', desc: 'AWS S3 URL' }, if: ->(object) { object.finished? } + + expose :zip_files, + documentation: { type: 'array', is_array: true, desc: 'ZIP files produced by the workflow' }, + if: ->(object) { object.finished? } end end end diff --git a/app/api/v1/envelopes.rb b/app/api/v1/envelopes.rb index cef605b7..f9d9b4b1 100644 --- a/app/api/v1/envelopes.rb +++ b/app/api/v1/envelopes.rb @@ -12,6 +12,7 @@ require 'v1/revisions' require 'v1/envelope_events' require 'download_envelopes_job' +require 'sync_envelope_download_workflow_status' module API module V1 @@ -79,17 +80,44 @@ class Envelopes < MountableAPI desc 'Returns the envelope download' get do + SyncEnvelopeDownloadWorkflowStatus.call( + envelope_download: @envelope_download + ) + present @envelope_download, with: API::Entities::EnvelopeDownload end desc 'Starts an envelope download' post do - @envelope_download.update!( - enqueued_at: Time.current, - status: :pending - ) + should_enqueue = false + response_status = :ok + + @envelope_download.with_lock do + active_download = @envelope_download.enqueued_at.present? && + (@envelope_download.in_progress? || @envelope_download.pending?) + current_published_at = Envelope.last_publish_event_at(current_community) + last_published_at = @envelope_download.last_published_at || Time.at(0) + + if !active_download && current_published_at&.>(last_published_at) + @envelope_download.update!( + argo_workflow_name: nil, + argo_workflow_namespace: nil, + enqueued_at: Time.current, + finished_at: nil, + internal_error_backtrace: [], + internal_error_message: nil, + last_published_at: current_published_at, + status: :pending, + url: nil, + zip_files: [] + ) + should_enqueue = true + response_status = :created + end + end - DownloadEnvelopesJob.perform_later(@envelope_download.id) + DownloadEnvelopesJob.perform_later(@envelope_download.id) if should_enqueue + status response_status present @envelope_download, with: API::Entities::EnvelopeDownload end end diff --git a/app/jobs/download_envelopes_job.rb b/app/jobs/download_envelopes_job.rb index b330c82d..559aa0c6 100644 --- a/app/jobs/download_envelopes_job.rb +++ b/app/jobs/download_envelopes_job.rb @@ -1,4 +1,4 @@ -require 'download_envelopes' +require 'submit_envelope_download_workflow' require 'envelope_download' # Create a ZIP archive contaning all of the envelopes from a certain community, @@ -10,7 +10,7 @@ def perform(envelope_download_id) envelope_download = EnvelopeDownload.find_by(id: envelope_download_id) return unless envelope_download - DownloadEnvelopes.call(envelope_download:) + SubmitEnvelopeDownloadWorkflow.call(envelope_download:) rescue StandardError => e Airbrake.notify(e, envelope_download_id:) raise e diff --git a/app/models/envelope.rb b/app/models/envelope.rb index e50b385a..0919185f 100644 --- a/app/models/envelope.rb +++ b/app/models/envelope.rb @@ -118,6 +118,12 @@ def self.select_scope(include_deleted = nil) end end + def self.last_publish_event_at(envelope_community) + EnvelopeVersion + .where(item_type: 'Envelope', envelope_community_id: envelope_community.id) + .maximum(:created_at) + end + def envelope_community_name envelope_community.name end diff --git a/app/services/argo_workflows_client.rb b/app/services/argo_workflows_client.rb new file mode 100644 index 00000000..0a7d7030 --- /dev/null +++ b/app/services/argo_workflows_client.rb @@ -0,0 +1,56 @@ +require 'argo_workflows_api_client' +require 'uri' + +class ArgoWorkflowsClient + attr_reader :namespace + + def initialize(configuration: build_configuration) + @namespace = ENV.fetch('ARGO_WORKFLOWS_NAMESPACE') + @workflow_service_api = ArgoWorkflowsApiClient::WorkflowServiceApi.new( + ArgoWorkflowsApiClient::ApiClient.new(configuration) + ) + end + + def get_workflow(name:) + @workflow_service_api.workflow_service_get_workflow( + namespace, + name, + return_type: 'Object' + ) + end + + def submit_workflow(template_name:, parameters:, generate_name:) + @workflow_service_api.workflow_service_submit_workflow( + { + namespace:, + resourceKind: 'WorkflowTemplate', + resourceName: template_name, + submitOptions: { + generateName: generate_name, + parameters: parameters.map { |key, value| "#{key}=#{value}" } + } + }, + namespace, + return_type: 'Object' + ) + end + + private + + def build_configuration + base_uri = URI.parse(ENV.fetch('ARGO_WORKFLOWS_BASE_URL')) + + ArgoWorkflowsApiClient::Configuration.new.tap do |config| + config.scheme = base_uri.scheme + config.host = [base_uri.host, base_uri.port].compact.join(':') + config.base_path = base_uri.path + config.api_key['Authorization'] = ENV.fetch('ARGO_WORKFLOWS_TOKEN') + config.api_key_prefix['Authorization'] = 'Bearer' + config.timeout = ENV.fetch('ARGO_WORKFLOWS_TIMEOUT_SECONDS', 30).to_i + + # We run this in a secure environment, so it can be disabled + config.verify_ssl = false + config.verify_ssl_host = false + end + end +end diff --git a/app/services/submit_envelope_download_workflow.rb b/app/services/submit_envelope_download_workflow.rb new file mode 100644 index 00000000..45d2bcf2 --- /dev/null +++ b/app/services/submit_envelope_download_workflow.rb @@ -0,0 +1,87 @@ +require 'argo_workflows_client' + +class SubmitEnvelopeDownloadWorkflow + def self.call(envelope_download:) + new(envelope_download).call + end + + attr_reader :envelope_download + + def initialize(envelope_download) + @envelope_download = envelope_download + end + + def call + envelope_download.with_lock do + return envelope_download if workflow_already_started? + + workflow = client.submit_workflow( + template_name: ENV.fetch('ARGO_WORKFLOWS_TEMPLATE_NAME'), + generate_name: "#{community_name.tr('_', '-')}-download-", + parameters: + ) + workflow_name = workflow.dig(:metadata, :name) + raise 'Argo workflow submission did not return a workflow name' if workflow_name.blank? + + envelope_download.update!( + argo_workflow_name: workflow_name, + argo_workflow_namespace: client.namespace, + finished_at: nil, + internal_error_backtrace: [], + internal_error_message: nil, + started_at: Time.current, + status: :in_progress, + zip_files: [], + url: nil + ) + end + rescue StandardError => e + envelope_download.update!( + argo_workflow_name: nil, + argo_workflow_namespace: nil, + finished_at: Time.current, + internal_error_backtrace: Array(e.backtrace), + internal_error_message: e.message, + status: :finished, + zip_files: [], + url: nil + ) + raise + end + + private + + def client + @client ||= ArgoWorkflowsClient.new + end + + def community_name + envelope_download.envelope_community.name + end + + def destination_prefix + "#{community_name}/downloads/#{envelope_download.id}" + end + + def parameters + { + 'batch-size' => ENV.fetch('ARGO_WORKFLOWS_BATCH_SIZE', '25000'), + 'aws-region' => ENV.fetch('AWS_REGION'), + 'destination-bucket' => ENV.fetch('ENVELOPE_DOWNLOADS_BUCKET'), + 'destination-prefix' => destination_prefix, + 'environment' => MR.env, + 'max-uncompressed-zip-size-bytes' => ENV.fetch( + 'ARGO_WORKFLOWS_MAX_UNCOMPRESSED_ZIP_SIZE_BYTES', + (200 * 1024 * 1024).to_s + ), + 'max-workers' => ENV.fetch('ARGO_WORKFLOWS_MAX_WORKERS', '4'), + 'source-bucket' => ENV.fetch('ENVELOPE_GRAPHS_BUCKET'), + 'source-prefix' => community_name, + 'task-image' => ENV.fetch('ARGO_WORKFLOWS_TASK_IMAGE') + } + end + + def workflow_already_started? + envelope_download.in_progress? && envelope_download.argo_workflow_name.present? + end +end diff --git a/app/services/sync_envelope_download_workflow_status.rb b/app/services/sync_envelope_download_workflow_status.rb new file mode 100644 index 00000000..1ea018a4 --- /dev/null +++ b/app/services/sync_envelope_download_workflow_status.rb @@ -0,0 +1,142 @@ +require 'argo_workflows_client' +require 'aws-sdk-s3' +require 'json' + +class SyncEnvelopeDownloadWorkflowStatus + SUCCESS_PHASE = 'Succeeded'.freeze + FAILURE_PHASES = %w[Error Failed].freeze + RUNNING_PHASE = 'Running'.freeze + + def self.call(envelope_download:) + new(envelope_download).call + end + + attr_reader :envelope_download + + def initialize(envelope_download) + @envelope_download = envelope_download + end + + def call + return envelope_download if envelope_download.argo_workflow_name.blank? + return envelope_download if envelope_download.finished? && envelope_download.zip_files.present? + + workflow = client.get_workflow(name: envelope_download.argo_workflow_name) + status = workflow.fetch(:status, {}) + phase = status[:phase] + + if phase == SUCCESS_PHASE + mark_success!(workflow:, status:) + elsif FAILURE_PHASES.include?(phase) + mark_failure!(status) + elsif phase == RUNNING_PHASE + mark_in_progress!(status) + end + + envelope_download + rescue ArgoWorkflowsApiClient::ApiError => e + mark_missing_workflow_as_failure!(e) if workflow_not_found?(e) + MR.logger.warn("Unable to sync Argo workflow #{envelope_download.argo_workflow_name}: #{e.message}") + envelope_download + end + + private + + def client + @client ||= ArgoWorkflowsClient.new + end + + def community_name + envelope_download.envelope_community.name + end + + def destination_bucket + ENV.fetch('ENVELOPE_DOWNLOADS_BUCKET') + end + + def mark_failure!(status) + envelope_download.update!( + finished_at: parse_time(status[:finishedAt]) || Time.current, + internal_error_backtrace: [], + internal_error_message: status[:message] || "Argo workflow #{status[:phase].to_s.downcase}", + status: :finished, + zip_files: [], + url: nil + ) + end + + def mark_missing_workflow_as_failure!(error) + envelope_download.update!( + argo_workflow_name: nil, + argo_workflow_namespace: nil, + finished_at: Time.current, + internal_error_backtrace: [], + internal_error_message: "Argo workflow not found: #{error.message}", + status: :finished, + zip_files: [], + url: nil + ) + end + + def mark_in_progress!(status) + envelope_download.update!( + started_at: parse_time(status[:startedAt]) || envelope_download.started_at || Time.current, + status: :in_progress + ) + end + + def mark_success!(workflow:, status:) + manifest = output_manifest(workflow:, status:) + zip_files = manifest.fetch('zip_files', []) + + if zip_files.present? + envelope_download.update!( + finished_at: parse_time(status[:finishedAt]) || Time.current, + internal_error_backtrace: [], + internal_error_message: nil, + status: :finished, + url: public_url_for(zip_files.first), + zip_files: + ) + else + envelope_download.update!( + finished_at: parse_time(status[:finishedAt]) || Time.current, + internal_error_backtrace: [], + internal_error_message: 'Argo workflow succeeded but did not return any ZIP files', + status: :finished, + zip_files: [], + url: nil + ) + end + end + + def parse_time(value) + Time.zone.parse(value) if value.present? + end + + def s3_client + @s3_client ||= Aws::S3::Client.new(region: ENV.fetch('AWS_REGION')) + end + + def output_manifest(workflow:, status:) + workflow_name = workflow.dig(:metadata, :name) + return {} if workflow_name.blank? + + parameters = status.dig(:nodes, workflow_name.to_sym, :outputs, :parameters) || [] + parameter = parameters.find { |item| item[:name] == 'zip-manifest' } + return {} unless parameter + + JSON.parse(parameter.fetch(:value)) + end + + def public_url_for(key) + Aws::S3::Resource.new(region: ENV.fetch('AWS_REGION')) + .bucket(destination_bucket) + .object(key) + .public_url + end + + def workflow_not_found?(error) + error.respond_to?(:code) && error.code.to_i == 404 + end +end diff --git a/db/migrate/20260306120000_add_argo_workflow_fields_to_envelope_downloads.rb b/db/migrate/20260306120000_add_argo_workflow_fields_to_envelope_downloads.rb new file mode 100644 index 00000000..bbc35fec --- /dev/null +++ b/db/migrate/20260306120000_add_argo_workflow_fields_to_envelope_downloads.rb @@ -0,0 +1,7 @@ +class AddArgoWorkflowFieldsToEnvelopeDownloads < ActiveRecord::Migration[8.0] + def change + add_column :envelope_downloads, :argo_workflow_name, :string + add_column :envelope_downloads, :argo_workflow_namespace, :string + add_column :envelope_downloads, :zip_files, :jsonb, default: [], null: false + end +end diff --git a/db/migrate/20260319120000_add_last_published_at_to_envelope_downloads.rb b/db/migrate/20260319120000_add_last_published_at_to_envelope_downloads.rb new file mode 100644 index 00000000..bea38550 --- /dev/null +++ b/db/migrate/20260319120000_add_last_published_at_to_envelope_downloads.rb @@ -0,0 +1,5 @@ +class AddLastPublishedAtToEnvelopeDownloads < ActiveRecord::Migration[8.0] + def change + add_column :envelope_downloads, :last_published_at, :datetime + end +end diff --git a/db/structure.sql b/db/structure.sql index fb410fb0..c0e20716 100644 --- a/db/structure.sql +++ b/db/structure.sql @@ -1,3 +1,8 @@ +\restrict 0dy55iEgxshkrNexCflHQqeKePQeKS9cgNddI1yhNU2MlrkmlYja0aF7oIYJK1j + +-- Dumped from database version 16.13 (Debian 16.13-1.pgdg13+1) +-- Dumped by pg_dump version 16.13 + SET statement_timeout = 0; SET lock_timeout = 0; SET idle_in_transaction_session_timeout = 0; @@ -326,7 +331,11 @@ CREATE TABLE public.envelope_downloads ( created_at timestamp(6) without time zone NOT NULL, updated_at timestamp(6) without time zone NOT NULL, status character varying DEFAULT 'pending'::character varying NOT NULL, - enqueued_at timestamp(6) without time zone + enqueued_at timestamp(6) without time zone, + argo_workflow_name character varying, + argo_workflow_namespace character varying, + zip_files jsonb DEFAULT '[]'::jsonb NOT NULL, + last_published_at timestamp(6) without time zone ); @@ -508,7 +517,24 @@ CREATE TABLE public.indexed_envelope_resources ( payload jsonb DEFAULT '"{}"'::jsonb NOT NULL, public_record boolean DEFAULT true NOT NULL, "search:resourcePublishType" character varying, - publication_status integer DEFAULT 0 NOT NULL + publication_status integer DEFAULT 0 NOT NULL, + "ceterms:name" character varying, + "rdfs:label" character varying, + "rdfs:label_en" character varying, + "rdfs:label_es" character varying, + "skos:note" character varying, + "skos:note_fr_us" character varying, + "skos:note_nl_nl" character varying, + "ceterms:globalJurisdiction" boolean[] DEFAULT '{}'::boolean[] NOT NULL, + "ceterms:temporalCoverage" date[] DEFAULT '{}'::date[] NOT NULL, + "ceterms:startTime" timestamp(6) without time zone[] DEFAULT '{}'::timestamp without time zone[] NOT NULL, + "ceterms:weight" double precision[] DEFAULT '{}'::double precision[] NOT NULL, + "ceterms:medianEarnings" integer[] DEFAULT '{}'::integer[] NOT NULL, + "ceterms:inLanguage" character varying[] DEFAULT '{}'::character varying[] NOT NULL, + "ceterms:email" character varying, + "ceterms:telephone" character varying, + "ceterms:contactType" character varying, + "ceterms:contactType_en" character varying ); @@ -1136,6 +1162,34 @@ CREATE INDEX envelope_resources_fts_trigram_idx ON public.envelope_resources USI CREATE INDEX envelopes_resources_id_idx ON public.envelopes USING btree (((processed_resource ->> '@id'::text))); +-- +-- Name: i_ctdl_ceterms_contactType_en_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_contactType_en_fts" ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("ceterms:contactType_en")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_ceterms_contactType_en_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_contactType_en_trgm" ON public.indexed_envelope_resources USING gin ("ceterms:contactType_en" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_ceterms_contactType_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_contactType_fts" ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("ceterms:contactType")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_ceterms_contactType_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_contactType_trgm" ON public.indexed_envelope_resources USING gin ("ceterms:contactType" public.gin_trgm_ops); + + -- -- Name: i_ctdl_ceterms_ctid; Type: INDEX; Schema: public; Owner: - -- @@ -1150,6 +1204,90 @@ CREATE UNIQUE INDEX i_ctdl_ceterms_ctid ON public.indexed_envelope_resources USI CREATE INDEX i_ctdl_ceterms_ctid_trgm ON public.indexed_envelope_resources USING gin ("ceterms:ctid" public.gin_trgm_ops); +-- +-- Name: i_ctdl_ceterms_email_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_email_fts ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("ceterms:email")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_ceterms_email_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_email_trgm ON public.indexed_envelope_resources USING gin ("ceterms:email" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_ceterms_globalJurisdiction; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_globalJurisdiction" ON public.indexed_envelope_resources USING gin ("ceterms:globalJurisdiction"); + + +-- +-- Name: i_ctdl_ceterms_inLanguage; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_inLanguage" ON public.indexed_envelope_resources USING gin ("ceterms:inLanguage"); + + +-- +-- Name: i_ctdl_ceterms_medianEarnings; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_medianEarnings" ON public.indexed_envelope_resources USING gin ("ceterms:medianEarnings"); + + +-- +-- Name: i_ctdl_ceterms_name_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_name_fts ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("ceterms:name")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_ceterms_name_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_name_trgm ON public.indexed_envelope_resources USING gin ("ceterms:name" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_ceterms_startTime; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_startTime" ON public.indexed_envelope_resources USING gin ("ceterms:startTime"); + + +-- +-- Name: i_ctdl_ceterms_telephone_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_telephone_fts ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("ceterms:telephone")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_ceterms_telephone_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_telephone_trgm ON public.indexed_envelope_resources USING gin ("ceterms:telephone" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_ceterms_temporalCoverage; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "i_ctdl_ceterms_temporalCoverage" ON public.indexed_envelope_resources USING gin ("ceterms:temporalCoverage"); + + +-- +-- Name: i_ctdl_ceterms_weight; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_ceterms_weight ON public.indexed_envelope_resources USING gin ("ceterms:weight"); + + -- -- Name: i_ctdl_envelope_resource_id; Type: INDEX; Schema: public; Owner: - -- @@ -1178,6 +1316,48 @@ CREATE INDEX i_ctdl_id_trgm ON public.indexed_envelope_resources USING gin ("@id CREATE INDEX i_ctdl_public_record ON public.indexed_envelope_resources USING btree (public_record); +-- +-- Name: i_ctdl_rdfs_label_en_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_rdfs_label_en_fts ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("rdfs:label_en")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_rdfs_label_en_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_rdfs_label_en_trgm ON public.indexed_envelope_resources USING gin ("rdfs:label_en" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_rdfs_label_es_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_rdfs_label_es_fts ON public.indexed_envelope_resources USING gin (to_tsvector('spanish'::regconfig, translate(("rdfs:label_es")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_rdfs_label_es_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_rdfs_label_es_trgm ON public.indexed_envelope_resources USING gin ("rdfs:label_es" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_rdfs_label_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_rdfs_label_fts ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("rdfs:label")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_rdfs_label_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_rdfs_label_trgm ON public.indexed_envelope_resources USING gin ("rdfs:label" public.gin_trgm_ops); + + -- -- Name: i_ctdl_search_ownedBy; Type: INDEX; Schema: public; Owner: - -- @@ -1227,6 +1407,48 @@ CREATE INDEX "i_ctdl_search_recordUpdated_desc" ON public.indexed_envelope_resou CREATE INDEX "i_ctdl_search_resourcePublishType" ON public.indexed_envelope_resources USING btree ("search:resourcePublishType"); +-- +-- Name: i_ctdl_skos_note_fr_us_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_skos_note_fr_us_fts ON public.indexed_envelope_resources USING gin (to_tsvector('french'::regconfig, translate(("skos:note_fr_us")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_skos_note_fr_us_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_skos_note_fr_us_trgm ON public.indexed_envelope_resources USING gin ("skos:note_fr_us" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_skos_note_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_skos_note_fts ON public.indexed_envelope_resources USING gin (to_tsvector('english'::regconfig, translate(("skos:note")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_skos_note_nl_nl_fts; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_skos_note_nl_nl_fts ON public.indexed_envelope_resources USING gin (to_tsvector('dutch'::regconfig, translate(("skos:note_nl_nl")::text, '/.'::text, ' '::text))); + + +-- +-- Name: i_ctdl_skos_note_nl_nl_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_skos_note_nl_nl_trgm ON public.indexed_envelope_resources USING gin ("skos:note_nl_nl" public.gin_trgm_ops); + + +-- +-- Name: i_ctdl_skos_note_trgm; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX i_ctdl_skos_note_trgm ON public.indexed_envelope_resources USING gin ("skos:note" public.gin_trgm_ops); + + -- -- Name: i_ctdl_type; Type: INDEX; Schema: public; Owner: - -- @@ -1828,10 +2050,14 @@ ALTER TABLE ONLY public.envelopes -- PostgreSQL database dump complete -- +\unrestrict 0dy55iEgxshkrNexCflHQqeKePQeKS9cgNddI1yhNU2MlrkmlYja0aF7oIYJK1j + SET search_path TO "$user", public; INSERT INTO "schema_migrations" (version) VALUES +('20260319120000'), ('20260310005238'), +('20260306120000'), ('20251022205617'), ('20250925025616'), ('20250922224518'), diff --git a/lib/argo_workflows_api_client.rb b/lib/argo_workflows_api_client.rb new file mode 100644 index 00000000..28632c9b --- /dev/null +++ b/lib/argo_workflows_api_client.rb @@ -0,0 +1,5 @@ +require 'argo_workflows_api_client/api_client' +require 'argo_workflows_api_client/api_error' +require 'argo_workflows_api_client/version' +require 'argo_workflows_api_client/configuration' +require 'argo_workflows_api_client/api/workflow_service_api' diff --git a/lib/argo_workflows_api_client/api/workflow_service_api.rb b/lib/argo_workflows_api_client/api/workflow_service_api.rb new file mode 100644 index 00000000..24cdf4f6 --- /dev/null +++ b/lib/argo_workflows_api_client/api/workflow_service_api.rb @@ -0,0 +1,1277 @@ +=begin +#Argo Workflows API + +#Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ + +OpenAPI spec version: VERSION + +Generated by: https://github.com/swagger-api/swagger-codegen.git +Swagger Codegen version: 3.0.78 +=end + +module ArgoWorkflowsApiClient + class WorkflowServiceApi + attr_accessor :api_client + + def initialize(api_client = ApiClient.default) + @api_client = api_client + end + # @param body + # @param namespace + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_create_workflow(body, namespace, opts = {}) + data, _status_code, _headers = workflow_service_create_workflow_with_http_info(body, namespace, opts) + data + end + + # @param body + # @param namespace + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_create_workflow_with_http_info(body, namespace, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_create_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_create_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_create_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}'.sub('{' + 'namespace' + '}', namespace.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:POST, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_create_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @option opts [String] :delete_options_grace_period_seconds The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. +optional. + # @option opts [String] :delete_options_preconditions_uid Specifies the target UID. +optional. + # @option opts [String] :delete_options_preconditions_resource_version Specifies the target ResourceVersion +optional. + # @option opts [BOOLEAN] :delete_options_orphan_dependents Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. +optional. + # @option opts [String] :delete_options_propagation_policy Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. +optional. + # @option opts [Array] :delete_options_dry_run When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed +optional +listType=atomic. + # @option opts [BOOLEAN] :delete_options_ignore_store_read_error_with_cluster_breaking_potential if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it +optional. + # @option opts [BOOLEAN] :force + # @return [IoArgoprojWorkflowV1alpha1WorkflowDeleteResponse] + def workflow_service_delete_workflow(namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_delete_workflow_with_http_info(namespace, name, opts) + data + end + + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @option opts [String] :delete_options_grace_period_seconds The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. +optional. + # @option opts [String] :delete_options_preconditions_uid Specifies the target UID. +optional. + # @option opts [String] :delete_options_preconditions_resource_version Specifies the target ResourceVersion +optional. + # @option opts [BOOLEAN] :delete_options_orphan_dependents Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. +optional. + # @option opts [String] :delete_options_propagation_policy Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. +optional. + # @option opts [Array] :delete_options_dry_run When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed +optional +listType=atomic. + # @option opts [BOOLEAN] :delete_options_ignore_store_read_error_with_cluster_breaking_potential if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it +optional. + # @option opts [BOOLEAN] :force + # @return [Array<(IoArgoprojWorkflowV1alpha1WorkflowDeleteResponse, Integer, Hash)>] IoArgoprojWorkflowV1alpha1WorkflowDeleteResponse data, response status code and response headers + def workflow_service_delete_workflow_with_http_info(namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_delete_workflow ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_delete_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_delete_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'deleteOptions.gracePeriodSeconds'] = opts[:'delete_options_grace_period_seconds'] if !opts[:'delete_options_grace_period_seconds'].nil? + query_params[:'deleteOptions.preconditions.uid'] = opts[:'delete_options_preconditions_uid'] if !opts[:'delete_options_preconditions_uid'].nil? + query_params[:'deleteOptions.preconditions.resourceVersion'] = opts[:'delete_options_preconditions_resource_version'] if !opts[:'delete_options_preconditions_resource_version'].nil? + query_params[:'deleteOptions.orphanDependents'] = opts[:'delete_options_orphan_dependents'] if !opts[:'delete_options_orphan_dependents'].nil? + query_params[:'deleteOptions.propagationPolicy'] = opts[:'delete_options_propagation_policy'] if !opts[:'delete_options_propagation_policy'].nil? + query_params[:'deleteOptions.dryRun'] = @api_client.build_collection_param(opts[:'delete_options_dry_run'], :multi) if !opts[:'delete_options_dry_run'].nil? + query_params[:'deleteOptions.ignoreStoreReadErrorWithClusterBreakingPotential'] = opts[:'delete_options_ignore_store_read_error_with_cluster_breaking_potential'] if !opts[:'delete_options_ignore_store_read_error_with_cluster_breaking_potential'].nil? + query_params[:'force'] = opts[:'force'] if !opts[:'force'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1WorkflowDeleteResponse' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:DELETE, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_delete_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @option opts [String] :get_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :fields Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\". + # @option opts [String] :uid Optional UID to retrieve a specific workflow (useful for archived workflows with the same name). + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_get_workflow(namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_get_workflow_with_http_info(namespace, name, opts) + data + end + + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @option opts [String] :get_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :fields Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\". + # @option opts [String] :uid Optional UID to retrieve a specific workflow (useful for archived workflows with the same name). + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_get_workflow_with_http_info(namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_get_workflow ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_get_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_get_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'getOptions.resourceVersion'] = opts[:'get_options_resource_version'] if !opts[:'get_options_resource_version'].nil? + query_params[:'fields'] = opts[:'fields'] if !opts[:'fields'].nil? + query_params[:'uid'] = opts[:'uid'] if !opts[:'uid'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:GET, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_get_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_lint_workflow(body, namespace, opts = {}) + data, _status_code, _headers = workflow_service_lint_workflow_with_http_info(body, namespace, opts) + data + end + + # @param body + # @param namespace + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_lint_workflow_with_http_info(body, namespace, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_lint_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_lint_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_lint_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/lint'.sub('{' + 'namespace' + '}', namespace.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:POST, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_lint_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param namespace + # @param [Hash] opts the optional parameters + # @option opts [String] :list_options_label_selector A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. + # @option opts [String] :list_options_field_selector A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. + # @option opts [BOOLEAN] :list_options_watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. + # @option opts [BOOLEAN] :list_options_allow_watch_bookmarks allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. +optional. + # @option opts [String] :list_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_resource_version_match resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_timeout_seconds Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. + # @option opts [String] :list_options_limit limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. + # @option opts [String] :list_options_continue The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + # @option opts [BOOLEAN] :list_options_send_initial_events `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional + # @option opts [String] :fields Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\". + # @option opts [String] :name_filter Filter type used for name filtering. Exact | Contains | Prefix. Default to Exact. + # @option opts [String] :created_after + # @option opts [String] :finished_before + # @return [IoArgoprojWorkflowV1alpha1WorkflowList] + def workflow_service_list_workflows(namespace, opts = {}) + data, _status_code, _headers = workflow_service_list_workflows_with_http_info(namespace, opts) + data + end + + # @param namespace + # @param [Hash] opts the optional parameters + # @option opts [String] :list_options_label_selector A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. + # @option opts [String] :list_options_field_selector A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. + # @option opts [BOOLEAN] :list_options_watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. + # @option opts [BOOLEAN] :list_options_allow_watch_bookmarks allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. +optional. + # @option opts [String] :list_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_resource_version_match resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_timeout_seconds Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. + # @option opts [String] :list_options_limit limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. + # @option opts [String] :list_options_continue The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + # @option opts [BOOLEAN] :list_options_send_initial_events `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional + # @option opts [String] :fields Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\". + # @option opts [String] :name_filter Filter type used for name filtering. Exact | Contains | Prefix. Default to Exact. + # @option opts [String] :created_after + # @option opts [String] :finished_before + # @return [Array<(IoArgoprojWorkflowV1alpha1WorkflowList, Integer, Hash)>] IoArgoprojWorkflowV1alpha1WorkflowList data, response status code and response headers + def workflow_service_list_workflows_with_http_info(namespace, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_list_workflows ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_list_workflows" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}'.sub('{' + 'namespace' + '}', namespace.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'listOptions.labelSelector'] = opts[:'list_options_label_selector'] if !opts[:'list_options_label_selector'].nil? + query_params[:'listOptions.fieldSelector'] = opts[:'list_options_field_selector'] if !opts[:'list_options_field_selector'].nil? + query_params[:'listOptions.watch'] = opts[:'list_options_watch'] if !opts[:'list_options_watch'].nil? + query_params[:'listOptions.allowWatchBookmarks'] = opts[:'list_options_allow_watch_bookmarks'] if !opts[:'list_options_allow_watch_bookmarks'].nil? + query_params[:'listOptions.resourceVersion'] = opts[:'list_options_resource_version'] if !opts[:'list_options_resource_version'].nil? + query_params[:'listOptions.resourceVersionMatch'] = opts[:'list_options_resource_version_match'] if !opts[:'list_options_resource_version_match'].nil? + query_params[:'listOptions.timeoutSeconds'] = opts[:'list_options_timeout_seconds'] if !opts[:'list_options_timeout_seconds'].nil? + query_params[:'listOptions.limit'] = opts[:'list_options_limit'] if !opts[:'list_options_limit'].nil? + query_params[:'listOptions.continue'] = opts[:'list_options_continue'] if !opts[:'list_options_continue'].nil? + query_params[:'listOptions.sendInitialEvents'] = opts[:'list_options_send_initial_events'] if !opts[:'list_options_send_initial_events'].nil? + query_params[:'fields'] = opts[:'fields'] if !opts[:'fields'].nil? + query_params[:'nameFilter'] = opts[:'name_filter'] if !opts[:'name_filter'].nil? + query_params[:'createdAfter'] = opts[:'created_after'] if !opts[:'created_after'].nil? + query_params[:'finishedBefore'] = opts[:'finished_before'] if !opts[:'finished_before'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1WorkflowList' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:GET, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_list_workflows\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # DEPRECATED: Cannot work via HTTP if podName is an empty string. Use WorkflowLogs. + # @param namespace + # @param name + # @param pod_name + # @param [Hash] opts the optional parameters + # @option opts [String] :log_options_container The container for which to stream logs. Defaults to only container if there is one container in the pod. +optional. + # @option opts [BOOLEAN] :log_options_follow Follow the log stream of the pod. Defaults to false. +optional. + # @option opts [BOOLEAN] :log_options_previous Return previous terminated container logs. Defaults to false. +optional. + # @option opts [String] :log_options_since_seconds A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. +optional. + # @option opts [String] :log_options_since_time_seconds Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. + # @option opts [Integer] :log_options_since_time_nanos Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context. + # @option opts [BOOLEAN] :log_options_timestamps If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. +optional. + # @option opts [String] :log_options_tail_lines If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +optional. + # @option opts [String] :log_options_limit_bytes If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit. +optional. + # @option opts [BOOLEAN] :log_options_insecure_skip_tls_verify_backend insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet). +optional. + # @option opts [String] :log_options_stream Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +featureGate=PodLogsQuerySplitStreams +optional. + # @option opts [String] :grep + # @option opts [String] :selector + # @return [StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry] + def workflow_service_pod_logs(namespace, name, pod_name, opts = {}) + data, _status_code, _headers = workflow_service_pod_logs_with_http_info(namespace, name, pod_name, opts) + data + end + + # DEPRECATED: Cannot work via HTTP if podName is an empty string. Use WorkflowLogs. + # @param namespace + # @param name + # @param pod_name + # @param [Hash] opts the optional parameters + # @option opts [String] :log_options_container The container for which to stream logs. Defaults to only container if there is one container in the pod. +optional. + # @option opts [BOOLEAN] :log_options_follow Follow the log stream of the pod. Defaults to false. +optional. + # @option opts [BOOLEAN] :log_options_previous Return previous terminated container logs. Defaults to false. +optional. + # @option opts [String] :log_options_since_seconds A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. +optional. + # @option opts [String] :log_options_since_time_seconds Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. + # @option opts [Integer] :log_options_since_time_nanos Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context. + # @option opts [BOOLEAN] :log_options_timestamps If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. +optional. + # @option opts [String] :log_options_tail_lines If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +optional. + # @option opts [String] :log_options_limit_bytes If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit. +optional. + # @option opts [BOOLEAN] :log_options_insecure_skip_tls_verify_backend insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet). +optional. + # @option opts [String] :log_options_stream Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +featureGate=PodLogsQuerySplitStreams +optional. + # @option opts [String] :grep + # @option opts [String] :selector + # @return [Array<(StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry, Integer, Hash)>] StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry data, response status code and response headers + def workflow_service_pod_logs_with_http_info(namespace, name, pod_name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_pod_logs ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_pod_logs" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_pod_logs" + end + # verify the required parameter 'pod_name' is set + if @api_client.config.client_side_validation && pod_name.nil? + fail ArgumentError, "Missing the required parameter 'pod_name' when calling WorkflowServiceApi.workflow_service_pod_logs" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/{podName}/log'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s).sub('{' + 'podName' + '}', pod_name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'logOptions.container'] = opts[:'log_options_container'] if !opts[:'log_options_container'].nil? + query_params[:'logOptions.follow'] = opts[:'log_options_follow'] if !opts[:'log_options_follow'].nil? + query_params[:'logOptions.previous'] = opts[:'log_options_previous'] if !opts[:'log_options_previous'].nil? + query_params[:'logOptions.sinceSeconds'] = opts[:'log_options_since_seconds'] if !opts[:'log_options_since_seconds'].nil? + query_params[:'logOptions.sinceTime.seconds'] = opts[:'log_options_since_time_seconds'] if !opts[:'log_options_since_time_seconds'].nil? + query_params[:'logOptions.sinceTime.nanos'] = opts[:'log_options_since_time_nanos'] if !opts[:'log_options_since_time_nanos'].nil? + query_params[:'logOptions.timestamps'] = opts[:'log_options_timestamps'] if !opts[:'log_options_timestamps'].nil? + query_params[:'logOptions.tailLines'] = opts[:'log_options_tail_lines'] if !opts[:'log_options_tail_lines'].nil? + query_params[:'logOptions.limitBytes'] = opts[:'log_options_limit_bytes'] if !opts[:'log_options_limit_bytes'].nil? + query_params[:'logOptions.insecureSkipTLSVerifyBackend'] = opts[:'log_options_insecure_skip_tls_verify_backend'] if !opts[:'log_options_insecure_skip_tls_verify_backend'].nil? + query_params[:'logOptions.stream'] = opts[:'log_options_stream'] if !opts[:'log_options_stream'].nil? + query_params[:'grep'] = opts[:'grep'] if !opts[:'grep'].nil? + query_params[:'selector'] = opts[:'selector'] if !opts[:'selector'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:GET, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_pod_logs\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_resubmit_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_resubmit_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_resubmit_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_resubmit_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_resubmit_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_resubmit_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_resubmit_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/resubmit'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_resubmit_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_resume_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_resume_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_resume_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_resume_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_resume_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_resume_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_resume_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/resume'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_resume_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_retry_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_retry_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_retry_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_retry_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_retry_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_retry_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_retry_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/retry'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_retry_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_set_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_set_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_set_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_set_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_set_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_set_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_set_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/set'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_set_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_stop_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_stop_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_stop_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_stop_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_stop_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_stop_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_stop_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/stop'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_stop_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_submit_workflow(body, namespace, opts = {}) + data, _status_code, _headers = workflow_service_submit_workflow_with_http_info(body, namespace, opts) + data + end + + # @param body + # @param namespace + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_submit_workflow_with_http_info(body, namespace, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_submit_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_submit_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_submit_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/submit'.sub('{' + 'namespace' + '}', namespace.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:POST, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_submit_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_suspend_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_suspend_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_suspend_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_suspend_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_suspend_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_suspend_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_suspend_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/suspend'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_suspend_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [IoArgoprojWorkflowV1alpha1Workflow] + def workflow_service_terminate_workflow(body, namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_terminate_workflow_with_http_info(body, namespace, name, opts) + data + end + + # @param body + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @return [Array<(IoArgoprojWorkflowV1alpha1Workflow, Integer, Hash)>] IoArgoprojWorkflowV1alpha1Workflow data, response status code and response headers + def workflow_service_terminate_workflow_with_http_info(body, namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_terminate_workflow ...' + end + # verify the required parameter 'body' is set + if @api_client.config.client_side_validation && body.nil? + fail ArgumentError, "Missing the required parameter 'body' when calling WorkflowServiceApi.workflow_service_terminate_workflow" + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_terminate_workflow" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_terminate_workflow" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/terminate'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + # HTTP header 'Content-Type' + header_params['Content-Type'] = @api_client.select_header_content_type(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] || @api_client.object_to_http_body(body) + + return_type = opts[:return_type] || 'IoArgoprojWorkflowV1alpha1Workflow' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:PUT, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_terminate_workflow\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param namespace + # @param [Hash] opts the optional parameters + # @option opts [String] :list_options_label_selector A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. + # @option opts [String] :list_options_field_selector A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. + # @option opts [BOOLEAN] :list_options_watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. + # @option opts [BOOLEAN] :list_options_allow_watch_bookmarks allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. +optional. + # @option opts [String] :list_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_resource_version_match resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_timeout_seconds Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. + # @option opts [String] :list_options_limit limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. + # @option opts [String] :list_options_continue The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + # @option opts [BOOLEAN] :list_options_send_initial_events `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional + # @return [StreamResultOfIoK8sApiCoreV1Event] + def workflow_service_watch_events(namespace, opts = {}) + data, _status_code, _headers = workflow_service_watch_events_with_http_info(namespace, opts) + data + end + + # @param namespace + # @param [Hash] opts the optional parameters + # @option opts [String] :list_options_label_selector A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. + # @option opts [String] :list_options_field_selector A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. + # @option opts [BOOLEAN] :list_options_watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. + # @option opts [BOOLEAN] :list_options_allow_watch_bookmarks allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. +optional. + # @option opts [String] :list_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_resource_version_match resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_timeout_seconds Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. + # @option opts [String] :list_options_limit limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. + # @option opts [String] :list_options_continue The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + # @option opts [BOOLEAN] :list_options_send_initial_events `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional + # @return [Array<(StreamResultOfIoK8sApiCoreV1Event, Integer, Hash)>] StreamResultOfIoK8sApiCoreV1Event data, response status code and response headers + def workflow_service_watch_events_with_http_info(namespace, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_watch_events ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_watch_events" + end + # resource path + local_var_path = '/api/v1/stream/events/{namespace}'.sub('{' + 'namespace' + '}', namespace.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'listOptions.labelSelector'] = opts[:'list_options_label_selector'] if !opts[:'list_options_label_selector'].nil? + query_params[:'listOptions.fieldSelector'] = opts[:'list_options_field_selector'] if !opts[:'list_options_field_selector'].nil? + query_params[:'listOptions.watch'] = opts[:'list_options_watch'] if !opts[:'list_options_watch'].nil? + query_params[:'listOptions.allowWatchBookmarks'] = opts[:'list_options_allow_watch_bookmarks'] if !opts[:'list_options_allow_watch_bookmarks'].nil? + query_params[:'listOptions.resourceVersion'] = opts[:'list_options_resource_version'] if !opts[:'list_options_resource_version'].nil? + query_params[:'listOptions.resourceVersionMatch'] = opts[:'list_options_resource_version_match'] if !opts[:'list_options_resource_version_match'].nil? + query_params[:'listOptions.timeoutSeconds'] = opts[:'list_options_timeout_seconds'] if !opts[:'list_options_timeout_seconds'].nil? + query_params[:'listOptions.limit'] = opts[:'list_options_limit'] if !opts[:'list_options_limit'].nil? + query_params[:'listOptions.continue'] = opts[:'list_options_continue'] if !opts[:'list_options_continue'].nil? + query_params[:'listOptions.sendInitialEvents'] = opts[:'list_options_send_initial_events'] if !opts[:'list_options_send_initial_events'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'StreamResultOfIoK8sApiCoreV1Event' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:GET, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_watch_events\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param namespace + # @param [Hash] opts the optional parameters + # @option opts [String] :list_options_label_selector A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. + # @option opts [String] :list_options_field_selector A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. + # @option opts [BOOLEAN] :list_options_watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. + # @option opts [BOOLEAN] :list_options_allow_watch_bookmarks allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. +optional. + # @option opts [String] :list_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_resource_version_match resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_timeout_seconds Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. + # @option opts [String] :list_options_limit limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. + # @option opts [String] :list_options_continue The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + # @option opts [BOOLEAN] :list_options_send_initial_events `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional + # @option opts [String] :fields + # @return [StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent] + def workflow_service_watch_workflows(namespace, opts = {}) + data, _status_code, _headers = workflow_service_watch_workflows_with_http_info(namespace, opts) + data + end + + # @param namespace + # @param [Hash] opts the optional parameters + # @option opts [String] :list_options_label_selector A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. + # @option opts [String] :list_options_field_selector A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. + # @option opts [BOOLEAN] :list_options_watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. + # @option opts [BOOLEAN] :list_options_allow_watch_bookmarks allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. +optional. + # @option opts [String] :list_options_resource_version resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_resource_version_match resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset +optional + # @option opts [String] :list_options_timeout_seconds Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. + # @option opts [String] :list_options_limit limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. + # @option opts [String] :list_options_continue The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + # @option opts [BOOLEAN] :list_options_send_initial_events `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional + # @option opts [String] :fields + # @return [Array<(StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent, Integer, Hash)>] StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent data, response status code and response headers + def workflow_service_watch_workflows_with_http_info(namespace, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_watch_workflows ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_watch_workflows" + end + # resource path + local_var_path = '/api/v1/workflow-events/{namespace}'.sub('{' + 'namespace' + '}', namespace.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'listOptions.labelSelector'] = opts[:'list_options_label_selector'] if !opts[:'list_options_label_selector'].nil? + query_params[:'listOptions.fieldSelector'] = opts[:'list_options_field_selector'] if !opts[:'list_options_field_selector'].nil? + query_params[:'listOptions.watch'] = opts[:'list_options_watch'] if !opts[:'list_options_watch'].nil? + query_params[:'listOptions.allowWatchBookmarks'] = opts[:'list_options_allow_watch_bookmarks'] if !opts[:'list_options_allow_watch_bookmarks'].nil? + query_params[:'listOptions.resourceVersion'] = opts[:'list_options_resource_version'] if !opts[:'list_options_resource_version'].nil? + query_params[:'listOptions.resourceVersionMatch'] = opts[:'list_options_resource_version_match'] if !opts[:'list_options_resource_version_match'].nil? + query_params[:'listOptions.timeoutSeconds'] = opts[:'list_options_timeout_seconds'] if !opts[:'list_options_timeout_seconds'].nil? + query_params[:'listOptions.limit'] = opts[:'list_options_limit'] if !opts[:'list_options_limit'].nil? + query_params[:'listOptions.continue'] = opts[:'list_options_continue'] if !opts[:'list_options_continue'].nil? + query_params[:'listOptions.sendInitialEvents'] = opts[:'list_options_send_initial_events'] if !opts[:'list_options_send_initial_events'].nil? + query_params[:'fields'] = opts[:'fields'] if !opts[:'fields'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:GET, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_watch_workflows\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @option opts [String] :pod_name + # @option opts [String] :log_options_container The container for which to stream logs. Defaults to only container if there is one container in the pod. +optional. + # @option opts [BOOLEAN] :log_options_follow Follow the log stream of the pod. Defaults to false. +optional. + # @option opts [BOOLEAN] :log_options_previous Return previous terminated container logs. Defaults to false. +optional. + # @option opts [String] :log_options_since_seconds A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. +optional. + # @option opts [String] :log_options_since_time_seconds Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. + # @option opts [Integer] :log_options_since_time_nanos Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context. + # @option opts [BOOLEAN] :log_options_timestamps If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. +optional. + # @option opts [String] :log_options_tail_lines If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +optional. + # @option opts [String] :log_options_limit_bytes If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit. +optional. + # @option opts [BOOLEAN] :log_options_insecure_skip_tls_verify_backend insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet). +optional. + # @option opts [String] :log_options_stream Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +featureGate=PodLogsQuerySplitStreams +optional. + # @option opts [String] :grep + # @option opts [String] :selector + # @return [StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry] + def workflow_service_workflow_logs(namespace, name, opts = {}) + data, _status_code, _headers = workflow_service_workflow_logs_with_http_info(namespace, name, opts) + data + end + + # @param namespace + # @param name + # @param [Hash] opts the optional parameters + # @option opts [String] :pod_name + # @option opts [String] :log_options_container The container for which to stream logs. Defaults to only container if there is one container in the pod. +optional. + # @option opts [BOOLEAN] :log_options_follow Follow the log stream of the pod. Defaults to false. +optional. + # @option opts [BOOLEAN] :log_options_previous Return previous terminated container logs. Defaults to false. +optional. + # @option opts [String] :log_options_since_seconds A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. +optional. + # @option opts [String] :log_options_since_time_seconds Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. + # @option opts [Integer] :log_options_since_time_nanos Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context. + # @option opts [BOOLEAN] :log_options_timestamps If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. +optional. + # @option opts [String] :log_options_tail_lines If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +optional. + # @option opts [String] :log_options_limit_bytes If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit. +optional. + # @option opts [BOOLEAN] :log_options_insecure_skip_tls_verify_backend insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet). +optional. + # @option opts [String] :log_options_stream Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\". +featureGate=PodLogsQuerySplitStreams +optional. + # @option opts [String] :grep + # @option opts [String] :selector + # @return [Array<(StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry, Integer, Hash)>] StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry data, response status code and response headers + def workflow_service_workflow_logs_with_http_info(namespace, name, opts = {}) + if @api_client.config.debugging + @api_client.config.logger.debug 'Calling API: WorkflowServiceApi.workflow_service_workflow_logs ...' + end + # verify the required parameter 'namespace' is set + if @api_client.config.client_side_validation && namespace.nil? + fail ArgumentError, "Missing the required parameter 'namespace' when calling WorkflowServiceApi.workflow_service_workflow_logs" + end + # verify the required parameter 'name' is set + if @api_client.config.client_side_validation && name.nil? + fail ArgumentError, "Missing the required parameter 'name' when calling WorkflowServiceApi.workflow_service_workflow_logs" + end + # resource path + local_var_path = '/api/v1/workflows/{namespace}/{name}/log'.sub('{' + 'namespace' + '}', namespace.to_s).sub('{' + 'name' + '}', name.to_s) + + # query parameters + query_params = opts[:query_params] || {} + query_params[:'podName'] = opts[:'pod_name'] if !opts[:'pod_name'].nil? + query_params[:'logOptions.container'] = opts[:'log_options_container'] if !opts[:'log_options_container'].nil? + query_params[:'logOptions.follow'] = opts[:'log_options_follow'] if !opts[:'log_options_follow'].nil? + query_params[:'logOptions.previous'] = opts[:'log_options_previous'] if !opts[:'log_options_previous'].nil? + query_params[:'logOptions.sinceSeconds'] = opts[:'log_options_since_seconds'] if !opts[:'log_options_since_seconds'].nil? + query_params[:'logOptions.sinceTime.seconds'] = opts[:'log_options_since_time_seconds'] if !opts[:'log_options_since_time_seconds'].nil? + query_params[:'logOptions.sinceTime.nanos'] = opts[:'log_options_since_time_nanos'] if !opts[:'log_options_since_time_nanos'].nil? + query_params[:'logOptions.timestamps'] = opts[:'log_options_timestamps'] if !opts[:'log_options_timestamps'].nil? + query_params[:'logOptions.tailLines'] = opts[:'log_options_tail_lines'] if !opts[:'log_options_tail_lines'].nil? + query_params[:'logOptions.limitBytes'] = opts[:'log_options_limit_bytes'] if !opts[:'log_options_limit_bytes'].nil? + query_params[:'logOptions.insecureSkipTLSVerifyBackend'] = opts[:'log_options_insecure_skip_tls_verify_backend'] if !opts[:'log_options_insecure_skip_tls_verify_backend'].nil? + query_params[:'logOptions.stream'] = opts[:'log_options_stream'] if !opts[:'log_options_stream'].nil? + query_params[:'grep'] = opts[:'grep'] if !opts[:'grep'].nil? + query_params[:'selector'] = opts[:'selector'] if !opts[:'selector'].nil? + + # header parameters + header_params = opts[:header_params] || {} + # HTTP header 'Accept' (if needed) + header_params['Accept'] = @api_client.select_header_accept(['application/json']) + + # form parameters + form_params = opts[:form_params] || {} + + # http body (model) + post_body = opts[:body] + + return_type = opts[:return_type] || 'StreamResultOfIoArgoprojWorkflowV1alpha1LogEntry' + + auth_names = opts[:auth_names] || ['BearerToken'] + data, status_code, headers = @api_client.call_api(:GET, local_var_path, + :header_params => header_params, + :query_params => query_params, + :form_params => form_params, + :body => post_body, + :auth_names => auth_names, + :return_type => return_type) + + if @api_client.config.debugging + @api_client.config.logger.debug "API called: WorkflowServiceApi#workflow_service_workflow_logs\nData: #{data.inspect}\nStatus code: #{status_code}\nHeaders: #{headers}" + end + return data, status_code, headers + end + end +end diff --git a/lib/argo_workflows_api_client/api_client.rb b/lib/argo_workflows_api_client/api_client.rb new file mode 100644 index 00000000..ac8e234c --- /dev/null +++ b/lib/argo_workflows_api_client/api_client.rb @@ -0,0 +1,388 @@ +=begin +#Argo Workflows API + +#Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ + +OpenAPI spec version: VERSION + +Generated by: https://github.com/swagger-api/swagger-codegen.git +Swagger Codegen version: 3.0.78 +=end + +require 'date' +require 'json' +require 'logger' +require 'tempfile' +require 'typhoeus' +require 'uri' + +module ArgoWorkflowsApiClient + class ApiClient + # The Configuration object holding settings to be used in the API client. + attr_accessor :config + + # Defines the headers to be used in HTTP requests of all API calls by default. + # + # @return [Hash] + attr_accessor :default_headers + + # Initializes the ApiClient + # @option config [Configuration] Configuration for initializing the object, default to Configuration.default + def initialize(config = Configuration.default) + @config = config + @user_agent = "Swagger-Codegen/#{VERSION}/ruby" + @default_headers = { + 'Content-Type' => 'application/json', + 'User-Agent' => @user_agent + } + end + + def self.default + @@default ||= ApiClient.new + end + + # Call an API with given options. + # + # @return [Array<(Object, Integer, Hash)>] an array of 3 elements: + # the data deserialized from response body (could be nil), response status code and response headers. + def call_api(http_method, path, opts = {}) + request = build_request(http_method, path, opts) + response = request.run + + if @config.debugging + @config.logger.debug "HTTP response body ~BEGIN~\n#{response.body}\n~END~\n" + end + + unless response.success? + if response.timed_out? + fail ApiError.new('Connection timed out') + elsif response.code == 0 + # Errors from libcurl will be made visible here + fail ApiError.new(:code => 0, + :message => response.return_message) + else + fail ApiError.new(:code => response.code, + :response_headers => response.headers, + :response_body => response.body), + response.status_message + end + end + + if opts[:return_type] + data = deserialize(response, opts[:return_type]) + else + data = nil + end + return data, response.code, response.headers + end + + # Builds the HTTP request + # + # @param [String] http_method HTTP method/verb (e.g. POST) + # @param [String] path URL path (e.g. /account/new) + # @option opts [Hash] :header_params Header parameters + # @option opts [Hash] :query_params Query parameters + # @option opts [Hash] :form_params Query parameters + # @option opts [Object] :body HTTP body (JSON/XML) + # @return [Typhoeus::Request] A Typhoeus Request + def build_request(http_method, path, opts = {}) + url = build_request_url(path) + http_method = http_method.to_sym.downcase + + header_params = @default_headers.merge(opts[:header_params] || {}) + query_params = opts[:query_params] || {} + form_params = opts[:form_params] || {} + + update_params_for_auth! header_params, query_params, opts[:auth_names] + + # set ssl_verifyhosts option based on @config.verify_ssl_host (true/false) + _verify_ssl_host = @config.verify_ssl_host ? 2 : 0 + + req_opts = { + :method => http_method, + :headers => header_params, + :params => query_params, + :params_encoding => @config.params_encoding, + :timeout => @config.timeout, + :ssl_verifypeer => @config.verify_ssl, + :ssl_verifyhost => _verify_ssl_host, + :sslcert => @config.cert_file, + :sslkey => @config.key_file, + :verbose => @config.debugging + } + + # set custom cert, if provided + req_opts[:cainfo] = @config.ssl_ca_cert if @config.ssl_ca_cert + + if [:post, :patch, :put, :delete].include?(http_method) + req_body = build_request_body(header_params, form_params, opts[:body]) + req_opts.update :body => req_body + if @config.debugging + @config.logger.debug "HTTP request body param ~BEGIN~\n#{req_body}\n~END~\n" + end + end + + request = Typhoeus::Request.new(url, req_opts) + download_file(request) if opts[:return_type] == 'File' + request + end + + # Builds the HTTP request body + # + # @param [Hash] header_params Header parameters + # @param [Hash] form_params Query parameters + # @param [Object] body HTTP body (JSON/XML) + # @return [String] HTTP body data in the form of string + def build_request_body(header_params, form_params, body) + # http form + if header_params['Content-Type'] == 'application/x-www-form-urlencoded' || + header_params['Content-Type'] == 'multipart/form-data' + data = {} + form_params.each do |key, value| + case value + when ::File, ::Array, nil + # let typhoeus handle File, Array and nil parameters + data[key] = value + else + data[key] = value.to_s + end + end + elsif body + data = body.is_a?(String) ? body : body.to_json + else + data = nil + end + data + end + + # Check if the given MIME is a JSON MIME. + # JSON MIME examples: + # application/json + # application/json; charset=UTF8 + # APPLICATION/JSON + # */* + # @param [String] mime MIME + # @return [Boolean] True if the MIME is application/json + def json_mime?(mime) + (mime == '*/*') || !(mime =~ /Application\/.*json(?!p)(;.*)?/i).nil? + end + + # Deserialize the response to the given return type. + # + # @param [Response] response HTTP response + # @param [String] return_type some examples: "User", "Array", "Hash" + def deserialize(response, return_type) + body = response.body + + # handle file downloading - return the File instance processed in request callbacks + # note that response body is empty when the file is written in chunks in request on_body callback + return @tempfile if return_type == 'File' + + return nil if body.nil? || body.empty? + + # return response body directly for String return type + return body if return_type == 'String' + + # ensuring a default content type + content_type = response.headers['Content-Type'] || 'application/json' + + fail "Content-Type is not supported: #{content_type}" unless json_mime?(content_type) + + begin + data = JSON.parse("[#{body}]", :symbolize_names => true)[0] + rescue JSON::ParserError => e + if %w(String Date DateTime).include?(return_type) + data = body + else + raise e + end + end + + convert_to_type data, return_type + end + + # Convert data to the given return type. + # @param [Object] data Data to be converted + # @param [String] return_type Return type + # @return [Mixed] Data in a particular type + def convert_to_type(data, return_type) + return nil if data.nil? + case return_type + when 'String' + data.to_s + when 'Integer' + data.to_i + when 'Float' + data.to_f + when 'Boolean' + data == true + when 'DateTime' + # parse date time (expecting ISO 8601 format) + DateTime.parse data + when 'Date' + # parse date time (expecting ISO 8601 format) + Date.parse data + when 'Object' + # generic object (usually a Hash), return directly + data + when /\AArray<(.+)>\z/ + # e.g. Array + sub_type = $1 + data.map { |item| convert_to_type(item, sub_type) } + when /\AHash\\z/ + # e.g. Hash + sub_type = $1 + {}.tap do |hash| + data.each { |k, v| hash[k] = convert_to_type(v, sub_type) } + end + else + # models, e.g. Pet + ArgoWorkflowsApiClient.const_get(return_type).build_from_hash(data) + end + end + + # Save response body into a file in (the defined) temporary folder, using the filename + # from the "Content-Disposition" header if provided, otherwise a random filename. + # The response body is written to the file in chunks in order to handle files which + # size is larger than maximum Ruby String or even larger than the maximum memory a Ruby + # process can use. + # + # @see Configuration#temp_folder_path + def download_file(request) + tempfile = nil + encoding = nil + request.on_headers do |response| + content_disposition = response.headers['Content-Disposition'] + if content_disposition && content_disposition =~ /filename=/i + filename = content_disposition[/filename=['"]?([^'"\s]+)['"]?/, 1] + prefix = sanitize_filename(filename) + else + prefix = 'download-' + end + prefix = prefix + '-' unless prefix.end_with?('-') + encoding = response.body.encoding + tempfile = Tempfile.open(prefix, @config.temp_folder_path, encoding: encoding) + @tempfile = tempfile + end + request.on_body do |chunk| + chunk.force_encoding(encoding) + tempfile.write(chunk) + end + request.on_complete do |response| + if tempfile + tempfile.close + @config.logger.info "Temp file written to #{tempfile.path}, please copy the file to a proper folder "\ + "with e.g. `FileUtils.cp(tempfile.path, '/new/file/path')` otherwise the temp file "\ + "will be deleted automatically with GC. It's also recommended to delete the temp file "\ + "explicitly with `tempfile.delete`" + end + end + end + + # Sanitize filename by removing path. + # e.g. ../../sun.gif becomes sun.gif + # + # @param [String] filename the filename to be sanitized + # @return [String] the sanitized filename + def sanitize_filename(filename) + filename.gsub(/.*[\/\\]/, '') + end + + def build_request_url(path) + # Add leading and trailing slashes to path + path = "/#{path}".gsub(/\/+/, '/') + @config.base_url + path + end + + # Update hearder and query params based on authentication settings. + # + # @param [Hash] header_params Header parameters + # @param [Hash] query_params Query parameters + # @param [String] auth_names Authentication scheme name + def update_params_for_auth!(header_params, query_params, auth_names) + Array(auth_names).each do |auth_name| + auth_setting = @config.auth_settings[auth_name] + next unless auth_setting + case auth_setting[:in] + when 'header' then header_params[auth_setting[:key]] = auth_setting[:value] + when 'query' then query_params[auth_setting[:key]] = auth_setting[:value] + else fail ArgumentError, 'Authentication token must be in `query` of `header`' + end + end + end + + # Sets user agent in HTTP header + # + # @param [String] user_agent User agent (e.g. swagger-codegen/ruby/1.0.0) + def user_agent=(user_agent) + @user_agent = user_agent + @default_headers['User-Agent'] = @user_agent + end + + # Return Accept header based on an array of accepts provided. + # @param [Array] accepts array for Accept + # @return [String] the Accept header (e.g. application/json) + def select_header_accept(accepts) + return nil if accepts.nil? || accepts.empty? + # use JSON when present, otherwise use all of the provided + json_accept = accepts.find { |s| json_mime?(s) } + json_accept || accepts.join(',') + end + + # Return Content-Type header based on an array of content types provided. + # @param [Array] content_types array for Content-Type + # @return [String] the Content-Type header (e.g. application/json) + def select_header_content_type(content_types) + # use application/json by default + return 'application/json' if content_types.nil? || content_types.empty? + # use JSON when present, otherwise use the first one + json_content_type = content_types.find { |s| json_mime?(s) } + json_content_type || content_types.first + end + + # Convert object (array, hash, object, etc) to JSON string. + # @param [Object] model object to be converted into JSON string + # @return [String] JSON string representation of the object + def object_to_http_body(model) + return model if model.nil? || model.is_a?(String) + local_body = nil + if model.is_a?(Array) + local_body = model.map { |m| object_to_hash(m) } + else + local_body = object_to_hash(model) + end + local_body.to_json + end + + # Convert object(non-array) to hash. + # @param [Object] obj object to be converted into JSON string + # @return [String] JSON string representation of the object + def object_to_hash(obj) + if obj.respond_to?(:to_hash) + obj.to_hash + else + obj + end + end + + # Build parameter value according to the given collection format. + # @param [String] collection_format one of :csv, :ssv, :tsv, :pipes and :multi + def build_collection_param(param, collection_format) + case collection_format + when :csv + param.join(',') + when :ssv + param.join(' ') + when :tsv + param.join("\t") + when :pipes + param.join('|') + when :multi + # return the array directly as typhoeus will handle it as expected + param + else + fail "unknown collection format: #{collection_format.inspect}" + end + end + end +end diff --git a/lib/argo_workflows_api_client/api_error.rb b/lib/argo_workflows_api_client/api_error.rb new file mode 100644 index 00000000..e164bbfe --- /dev/null +++ b/lib/argo_workflows_api_client/api_error.rb @@ -0,0 +1,57 @@ +=begin +#Argo Workflows API + +#Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ + +OpenAPI spec version: VERSION + +Generated by: https://github.com/swagger-api/swagger-codegen.git +Swagger Codegen version: 3.0.78 +=end + +module ArgoWorkflowsApiClient + class ApiError < StandardError + attr_reader :code, :response_headers, :response_body + + # Usage examples: + # ApiError.new + # ApiError.new("message") + # ApiError.new(:code => 500, :response_headers => {}, :response_body => "") + # ApiError.new(:code => 404, :message => "Not Found") + def initialize(arg = nil) + if arg.is_a? Hash + if arg.key?(:message) || arg.key?('message') + super(arg[:message] || arg['message']) + else + super arg + end + + arg.each do |k, v| + instance_variable_set "@#{k}", v + end + else + super arg + end + end + + # Override to_s to display a friendly error message + def to_s + message + end + + def message + if @message.nil? + msg = "Error message: the server returns an error" + else + msg = @message + end + + msg += "\nHTTP status code: #{code}" if code + msg += "\nResponse headers: #{response_headers}" if response_headers + msg += "\nResponse body: #{response_body}" if response_body + + msg + end + + end +end diff --git a/lib/argo_workflows_api_client/configuration.rb b/lib/argo_workflows_api_client/configuration.rb new file mode 100644 index 00000000..358245fc --- /dev/null +++ b/lib/argo_workflows_api_client/configuration.rb @@ -0,0 +1,205 @@ +=begin +#Argo Workflows API + +#Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ + +OpenAPI spec version: VERSION + +Generated by: https://github.com/swagger-api/swagger-codegen.git +Swagger Codegen version: 3.0.78 +=end + +module ArgoWorkflowsApiClient + class Configuration + # Defines url scheme + attr_accessor :scheme + + # Defines url host + attr_accessor :host + + # Defines url base path + attr_accessor :base_path + + # Defines API keys used with API Key authentications. + # + # @return [Hash] key: parameter name, value: parameter value (API key) + # + # @example parameter name is "api_key", API key is "xxx" (e.g. "api_key=xxx" in query string) + # config.api_key['api_key'] = 'xxx' + attr_accessor :api_key + + # Defines API key prefixes used with API Key authentications. + # + # @return [Hash] key: parameter name, value: API key prefix + # + # @example parameter name is "Authorization", API key prefix is "Token" (e.g. "Authorization: Token xxx" in headers) + # config.api_key_prefix['api_key'] = 'Token' + attr_accessor :api_key_prefix + + # Defines the username used with HTTP basic authentication. + # + # @return [String] + attr_accessor :username + + # Defines the password used with HTTP basic authentication. + # + # @return [String] + attr_accessor :password + + # Defines the access token (Bearer) used with OAuth2. + attr_accessor :access_token + + # Set this to enable/disable debugging. When enabled (set to true), HTTP request/response + # details will be logged with `logger.debug` (see the `logger` attribute). + # Default to false. + # + # @return [true, false] + attr_accessor :debugging + + # Defines the logger used for debugging. + # Default to `Rails.logger` (when in Rails) or logging to STDOUT. + # + # @return [#debug] + attr_accessor :logger + + # Defines the temporary folder to store downloaded files + # (for API endpoints that have file response). + # Default to use `Tempfile`. + # + # @return [String] + attr_accessor :temp_folder_path + + # The time limit for HTTP request in seconds. + # Default to 0 (never times out). + attr_accessor :timeout + + # Set this to false to skip client side validation in the operation. + # Default to true. + # @return [true, false] + attr_accessor :client_side_validation + + ### TLS/SSL setting + # Set this to false to skip verifying SSL certificate when calling API from https server. + # Default to true. + # + # @note Do NOT set it to false in production code, otherwise you would face multiple types of cryptographic attacks. + # + # @return [true, false] + attr_accessor :verify_ssl + + ### TLS/SSL setting + # Set this to false to skip verifying SSL host name + # Default to true. + # + # @note Do NOT set it to false in production code, otherwise you would face multiple types of cryptographic attacks. + # + # @return [true, false] + attr_accessor :verify_ssl_host + + ### TLS/SSL setting + # Set this to customize the certificate file to verify the peer. + # + # @return [String] the path to the certificate file + # + # @see The `cainfo` option of Typhoeus, `--cert` option of libcurl. Related source code: + # https://github.com/typhoeus/typhoeus/blob/master/lib/typhoeus/easy_factory.rb#L145 + attr_accessor :ssl_ca_cert + + ### TLS/SSL setting + # Client certificate file (for client certificate) + attr_accessor :cert_file + + ### TLS/SSL setting + # Client private key file (for client certificate) + attr_accessor :key_file + + # Set this to customize parameters encoding of array parameter with multi collectionFormat. + # Default to nil. + # + # @see The params_encoding option of Ethon. Related source code: + # https://github.com/typhoeus/ethon/blob/master/lib/ethon/easy/queryable.rb#L96 + attr_accessor :params_encoding + + attr_accessor :inject_format + + attr_accessor :force_ending_format + + def initialize + @scheme = 'http' + @host = 'localhost' + @base_path = 'http://localhost:2746/' + @api_key = {} + @api_key_prefix = {} + @timeout = 0 + @client_side_validation = true + @verify_ssl = true + @verify_ssl_host = true + @params_encoding = nil + @cert_file = nil + @key_file = nil + @debugging = false + @inject_format = false + @force_ending_format = false + @logger = defined?(Rails) ? Rails.logger : Logger.new(STDOUT) + + yield(self) if block_given? + end + + # The default Configuration object. + def self.default + @@default ||= Configuration.new + end + + def configure + yield(self) if block_given? + end + + def scheme=(scheme) + # remove :// from scheme + @scheme = scheme.sub(/:\/\//, '') + end + + def host=(host) + # remove http(s):// and anything after a slash + @host = host.sub(/https?:\/\//, '').split('/').first + end + + def base_path=(base_path) + # Add leading and trailing slashes to base_path + @base_path = "/#{base_path}".gsub(/\/+/, '/') + @base_path = '' if @base_path == '/' + end + + def base_url + "#{scheme}://#{[host, base_path].join('/').gsub(/\/+/, '/')}".sub(/\/+\z/, '') + end + + # Gets API key (with prefix if set). + # @param [String] param_name the parameter name of API key auth + def api_key_with_prefix(param_name) + if @api_key_prefix[param_name] + "#{@api_key_prefix[param_name]} #{@api_key[param_name]}" + else + @api_key[param_name] + end + end + + # Gets Basic Auth token string + def basic_auth_token + 'Basic ' + ["#{username}:#{password}"].pack('m').delete("\r\n") + end + + # Returns Auth Settings hash for api client. + def auth_settings + { + 'BearerToken' => + { + type: 'api_key', + in: 'header', + key: 'Authorization', + value: api_key_with_prefix('Authorization') + }, + } + end + end +end diff --git a/lib/argo_workflows_api_client/version.rb b/lib/argo_workflows_api_client/version.rb new file mode 100644 index 00000000..8d1b7057 --- /dev/null +++ b/lib/argo_workflows_api_client/version.rb @@ -0,0 +1,14 @@ +=begin +#Argo Workflows API + +#Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ + +OpenAPI spec version: VERSION + +Generated by: https://github.com/swagger-api/swagger-codegen.git +Swagger Codegen version: 3.0.78 +=end + +module ArgoWorkflowsApiClient + VERSION = '1.0.0' +end diff --git a/lib/swagger_docs/models.rb b/lib/swagger_docs/models.rb index 72b91296..b2f9e0f6 100644 --- a/lib/swagger_docs/models.rb +++ b/lib/swagger_docs/models.rb @@ -378,9 +378,37 @@ module Models # rubocop:todo Metrics/ModuleLength, Style/Documentation type: :string, description: 'Status (pending, in progress, finished, or failed)' + property :last_published_at, + type: :string, + format: :'date-time', + description: 'Timestamp of the latest publish event included in this download' + + property :enqueued_at, + type: :string, + format: :'date-time', + description: 'When the download was enqueued' + + property :started_at, + type: :string, + format: :'date-time', + description: 'When the download started' + + property :finished_at, + type: :string, + format: :'date-time', + description: 'When the download finished' + property :url, type: :string, description: 'S3 URL (when finished)' + + property :zip_files do + key :type, :array + key :description, 'ZIP files produced by the workflow when finished' + items do + key :type, :string + end + end end swagger_schema :RetrieveDescriptionSets do diff --git a/lib/swagger_docs/sections/envelopes.rb b/lib/swagger_docs/sections/envelopes.rb index bf5baf57..1868a210 100644 --- a/lib/swagger_docs/sections/envelopes.rb +++ b/lib/swagger_docs/sections/envelopes.rb @@ -62,7 +62,7 @@ module Envelopes # rubocop:todo Metrics/ModuleLength, Style/Documentation swagger_path '/{community_name}/envelopes/download' do operation :get do key :operationId, 'getApiEnvelopesDownload' - key :description, "Returns the download's status and URL" + key :description, "Returns the download's status, publish timestamp, and URLs" key :produces, ['application/json'] key :tags, ['Envelopes'] @@ -76,14 +76,19 @@ module Envelopes # rubocop:todo Metrics/ModuleLength, Style/Documentation operation :post do key :operationId, 'postApiEnvelopesDownloads' - key :description, 'Starts a new download' + key :description, 'Starts a new download when newer publish events exist, otherwise returns the existing download' key :produces, ['application/json'] key :tags, ['Envelopes'] parameter community_name + response 200 do + key :description, 'Existing download object' + schema { key :$ref, :EnvelopeDownload } + end + response 201 do - key :description, 'Download object' + key :description, 'Newly started download object' schema { key :$ref, :EnvelopeDownload } end end diff --git a/scripts/generate_argo_workflows_client.sh b/scripts/generate_argo_workflows_client.sh new file mode 100755 index 00000000..a396f81a --- /dev/null +++ b/scripts/generate_argo_workflows_client.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SPEC_URL="${ARGO_WORKFLOWS_SWAGGER_URL:-https://raw.githubusercontent.com/argoproj/argo-workflows/main/api/openapi-spec/swagger.json}" +IMAGE="${SWAGGER_CODEGEN_IMAGE:-swaggerapi/swagger-codegen-cli-v3}" +MODULE_NAME="ArgoWorkflowsApiClient" +GEM_NAME="argo_workflows_api_client" + +TMP_DIR="$(mktemp -d)" +cleanup() { + rm -rf "$TMP_DIR" +} +trap cleanup EXIT + +require_cmd() { + local cmd="$1" + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Missing required command: $cmd" >&2 + exit 1 + fi +} + +require_cmd curl +require_cmd docker + +echo "Fetching Argo Workflows Swagger spec from $SPEC_URL" +curl -fsSL "$SPEC_URL" -o "$TMP_DIR/swagger.json" + +echo "Generating Ruby client with $IMAGE" +docker run --rm \ + -v "$TMP_DIR:/local" \ + "$IMAGE" generate \ + -i /local/swagger.json \ + -l ruby \ + -o /local/out \ + -D "moduleName=$MODULE_NAME,gemName=$GEM_NAME" >/dev/null + +DEST_DIR="$ROOT_DIR/lib/argo_workflows_api_client" +API_DIR="$DEST_DIR/api" + +mkdir -p "$API_DIR" + +cp "$TMP_DIR/out/lib/$GEM_NAME/api_client.rb" "$DEST_DIR/api_client.rb" +cp "$TMP_DIR/out/lib/$GEM_NAME/api_error.rb" "$DEST_DIR/api_error.rb" +cp "$TMP_DIR/out/lib/$GEM_NAME/configuration.rb" "$DEST_DIR/configuration.rb" +cp "$TMP_DIR/out/lib/$GEM_NAME/version.rb" "$DEST_DIR/version.rb" +cp "$TMP_DIR/out/lib/$GEM_NAME/api/workflow_service_api.rb" "$API_DIR/workflow_service_api.rb" + +cat > "$ROOT_DIR/lib/argo_workflows_api_client.rb" <<'RUBY' +require 'argo_workflows_api_client/api_client' +require 'argo_workflows_api_client/api_error' +require 'argo_workflows_api_client/version' +require 'argo_workflows_api_client/configuration' +require 'argo_workflows_api_client/api/workflow_service_api' +RUBY + +echo "Updated vendored Argo Workflows client in $DEST_DIR" diff --git a/spec/api/v1/envelopes_spec.rb b/spec/api/v1/envelopes_spec.rb index b77d5ab5..189f8528 100644 --- a/spec/api/v1/envelopes_spec.rb +++ b/spec/api/v1/envelopes_spec.rb @@ -181,7 +181,8 @@ expect(envelope_download.envelope_community).to eq(envelope_community) expect(envelope_download.status).to eq('pending') - expect_json_sizes(2) + expect_json_sizes(3) + expect_json('last_published_at', nil) expect_json('enqueued_at', nil) expect_json('status', 'pending') end @@ -198,10 +199,13 @@ internal_error_message:, started_at:, status:, + zip_files:, url: ) end + let(:zip_files) { [] } + # rubocop:todo RSpec/MultipleMemoizedHelpers # rubocop:todo RSpec/NestedGroups context 'in progress' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups @@ -211,7 +215,8 @@ it 'returns `in progress`' do expect { perform_request }.not_to change(EnvelopeDownload, :count) expect_status(:ok) - expect_json_sizes(2) + expect_json_sizes(3) + expect_json('last_published_at', nil) expect_json('started_at', envelope_download.started_at.as_json) expect_json('status', 'in_progress') end @@ -225,14 +230,17 @@ let(:internal_error_message) { Faker::Lorem.sentence } let(:status) { :finished } let(:url) { Faker::Internet.url } + let(:zip_files) { [url] } it 'returns `failed`' do expect { perform_request }.not_to change(EnvelopeDownload, :count) expect_status(:ok) - expect_json_sizes(3) + expect_json_sizes(5) + expect_json('last_published_at', nil) expect_json('finished_at', envelope_download.finished_at.as_json) expect_json('status', 'failed') expect_json('url', url) + expect_json('zip_files', zip_files) end end # rubocop:enable RSpec/MultipleMemoizedHelpers @@ -244,14 +252,17 @@ let(:finished_at) { Time.current } let(:status) { :finished } let(:url) { Faker::Internet.url } + let(:zip_files) { [url, "#{url}/second.zip"] } it 'returns `finished` and URL' do expect { perform_request }.not_to change(EnvelopeDownload, :count) expect_status(:ok) - expect_json_sizes(3) + expect_json_sizes(5) + expect_json('last_published_at', nil) expect_json('finished_at', envelope_download.finished_at.as_json) expect_json('status', 'finished') expect_json('url', url) + expect_json('zip_files', zip_files) end end # rubocop:enable RSpec/MultipleMemoizedHelpers @@ -279,6 +290,14 @@ post '/envelopes/download', nil, 'Authorization' => "Token #{auth_token}" end + before do + PaperTrail.enabled = true + end + + after do + PaperTrail.enabled = false + end + context 'with invalid token' do let(:auth_token) { 'invalid token' } @@ -298,6 +317,12 @@ # rubocop:todo RSpec/MultipleExpectations it 'creates new pending download and enqueues job' do # rubocop:todo RSpec/ExampleLength # rubocop:enable RSpec/MultipleExpectations + published_at = now - 5.minutes + + travel_to published_at do + create(:envelope, :from_cer, envelope_community:) + end + travel_to now do expect { perform_request }.to change(EnvelopeDownload, :count).by(1) end @@ -306,9 +331,11 @@ envelope_download = EnvelopeDownload.last expect(envelope_download.envelope_community).to eq(envelope_community) + expect(envelope_download.last_published_at).to eq(published_at) expect(envelope_download.status).to eq('pending') - expect_json_sizes(2) + expect_json_sizes(3) + expect_json('last_published_at', published_at.as_json) expect_json('enqueued_at', now.as_json) expect_json('status', 'pending') @@ -325,8 +352,31 @@ let!(:envelope_download) do create(:envelope_download, :finished, envelope_community:) end + let(:published_at) { now - 10.minutes } + + before do + travel_to published_at do + create(:envelope, :from_cer, envelope_community:) + end + end + + it 'returns the existing download when no newer publish event exists' do + envelope_download.update!(last_published_at: published_at) + + expect { perform_request }.to not_enqueue_job(DownloadEnvelopesJob) + + expect_status(:ok) + expect(envelope_download.reload.status).to eq('finished') + expect(envelope_download.last_published_at).to eq(published_at) + + expect_json('finished_at', envelope_download.finished_at.as_json) + expect_json('status', 'finished') + end + + it 'enqueues job for existing download when there is a newer publish event' do + previous_publish_time = published_at - 5.minutes + envelope_download.update!(last_published_at: previous_publish_time) - it 'enqueues job for existing download' do travel_to now do expect { perform_request }.to not_change(EnvelopeDownload, :count) .and enqueue_job(DownloadEnvelopesJob).with(envelope_download.id) @@ -334,11 +384,83 @@ expect_status(:created) expect(envelope_download.reload.status).to eq('pending') + expect(envelope_download.last_published_at).to eq(published_at) - expect_json_sizes(2) + expect_json_sizes(3) + expect_json('last_published_at', published_at.as_json) expect_json('enqueued_at', now.as_json) expect_json('status', 'pending') end + + it 'clears previous failure fields when retrying a failed download' do + envelope_download.update!( + argo_workflow_name: 'old-workflow', + argo_workflow_namespace: 'credreg-staging', + finished_at: 5.minutes.ago.change(usec: 0), + internal_error_backtrace: ['boom'], + internal_error_message: 'zip task failed', + url: 'https://downloads.example/old.zip', + zip_files: ['old.zip'] + ) + envelope_download.update!(last_published_at: published_at - 5.minutes) + + travel_to now do + expect { perform_request }.to enqueue_job(DownloadEnvelopesJob).with(envelope_download.id) + end + + expect_status(:created) + + envelope_download.reload + expect(envelope_download.status).to eq('pending') + expect(envelope_download.enqueued_at).to eq(now) + expect(envelope_download.finished_at).to be_nil + expect(envelope_download.internal_error_message).to be_nil + expect(envelope_download.internal_error_backtrace).to eq([]) + expect(envelope_download.last_published_at).to eq(published_at) + expect(envelope_download.url).to be_nil + expect(envelope_download.zip_files).to eq([]) + expect(envelope_download.argo_workflow_name).to be_nil + expect(envelope_download.argo_workflow_namespace).to be_nil + + expect_json_sizes(3) + expect_json('last_published_at', published_at.as_json) + expect_json('enqueued_at', now.as_json) + expect_json('status', 'pending') + end + + it 'does not enqueue a duplicate job when the download is already pending' do + envelope_download.update!( + enqueued_at: now, + last_published_at: published_at - 5.minutes, + status: :pending + ) + + expect { perform_request }.to not_enqueue_job(DownloadEnvelopesJob) + + expect_status(:ok) + expect(envelope_download.reload.status).to eq('pending') + expect_json_sizes(3) + expect_json('last_published_at', envelope_download.last_published_at.as_json) + expect_json('enqueued_at', now.as_json) + expect_json('status', 'pending') + end + + it 'does not enqueue a duplicate job when the download is already in progress' do + envelope_download.update!( + last_published_at: published_at - 5.minutes, + started_at: now, + status: :in_progress + ) + + expect { perform_request }.to not_enqueue_job(DownloadEnvelopesJob) + + expect_status(:ok) + expect(envelope_download.reload.status).to eq('in_progress') + expect_json_sizes(3) + expect_json('last_published_at', envelope_download.last_published_at.as_json) + expect_json('started_at', now.as_json) + expect_json('status', 'in_progress') + end end # rubocop:enable RSpec/MultipleMemoizedHelpers end diff --git a/spec/factories/envelope_downloads.rb b/spec/factories/envelope_downloads.rb index caf0a418..9971381d 100644 --- a/spec/factories/envelope_downloads.rb +++ b/spec/factories/envelope_downloads.rb @@ -1,6 +1,8 @@ FactoryBot.define do factory :envelope_download do enqueued_at { Time.current.change(usec: 0) } + last_published_at { nil } + zip_files { [] } # rubocop:todo FactoryBot/FactoryAssociationWithStrategy envelope_community { create(:envelope_community, :with_random_name) } # rubocop:enable FactoryBot/FactoryAssociationWithStrategy diff --git a/spec/jobs/download_envelopes_job_spec.rb b/spec/jobs/download_envelopes_job_spec.rb index 726c4bd1..5465e912 100644 --- a/spec/jobs/download_envelopes_job_spec.rb +++ b/spec/jobs/download_envelopes_job_spec.rb @@ -5,8 +5,8 @@ describe '#perform' do context 'without error' do - it 'calls DownloadEnvelopes' do - allow(DownloadEnvelopes).to receive(:call).with(envelope_download:) + it 'submits the Argo workflow' do + allow(SubmitEnvelopeDownloadWorkflow).to receive(:call).with(envelope_download:) described_class.new.perform(envelope_download.id) end end @@ -18,7 +18,7 @@ allow(Airbrake).to receive(:notify) .with(error, envelope_download_id: envelope_download.id) - allow(DownloadEnvelopes).to receive(:call) + allow(SubmitEnvelopeDownloadWorkflow).to receive(:call) .with(envelope_download:) .and_raise(error) diff --git a/spec/services/argo_workflows_client_spec.rb b/spec/services/argo_workflows_client_spec.rb new file mode 100644 index 00000000..ad421551 --- /dev/null +++ b/spec/services/argo_workflows_client_spec.rb @@ -0,0 +1,48 @@ +require 'spec_helper' + +RSpec.describe ArgoWorkflowsClient do + let(:api_client) { instance_double(ArgoWorkflowsApiClient::ApiClient) } + let(:workflow_service_api) { instance_double(ArgoWorkflowsApiClient::WorkflowServiceApi) } + let(:configuration) { instance_double(ArgoWorkflowsApiClient::Configuration) } + let(:workflow) { { metadata: { name: 'ce-registry-download-abc123' } } } + + before do + allow(ENV).to receive(:fetch).and_call_original + allow(ENV).to receive(:fetch).with('ARGO_WORKFLOWS_NAMESPACE').and_return('credreg-staging') + allow(ArgoWorkflowsApiClient::ApiClient).to receive(:new).with(configuration).and_return(api_client) + allow(ArgoWorkflowsApiClient::WorkflowServiceApi) + .to receive(:new).with(api_client).and_return(workflow_service_api) + end + + describe '#submit_workflow' do + it 'passes generateName and parameters to the Argo client' do + client = described_class.new(configuration:) + + allow(workflow_service_api).to receive(:workflow_service_submit_workflow) + .with( + { + namespace: 'credreg-staging', + resourceKind: 'WorkflowTemplate', + resourceName: 's3-graphs-zip', + submitOptions: { + generateName: 'ce-registry-download-', + parameters: ['source-prefix=ce_registry', 'destination-bucket=downloads-bucket'] + } + }, + 'credreg-staging', + return_type: 'Object' + ).and_return(workflow) + + result = client.submit_workflow( + template_name: 's3-graphs-zip', + generate_name: 'ce-registry-download-', + parameters: { + 'source-prefix' => 'ce_registry', + 'destination-bucket' => 'downloads-bucket' + } + ) + + expect(result).to eq(workflow) + end + end +end diff --git a/spec/services/download_envelopes_spec.rb b/spec/services/download_envelopes_spec.rb index 5ec381d6..0f5a62ae 100644 --- a/spec/services/download_envelopes_spec.rb +++ b/spec/services/download_envelopes_spec.rb @@ -1,3 +1,5 @@ +require 'download_envelopes' + RSpec.describe DownloadEnvelopes do # rubocop:todo RSpec/MultipleMemoizedHelpers let(:bucket) { double('bucket') } # rubocop:todo RSpec/VerifiedDoubles let(:bucket_name) { 'envelope-downloads-bucket-test' } diff --git a/spec/services/submit_envelope_download_workflow_spec.rb b/spec/services/submit_envelope_download_workflow_spec.rb new file mode 100644 index 00000000..aa6a3a53 --- /dev/null +++ b/spec/services/submit_envelope_download_workflow_spec.rb @@ -0,0 +1,71 @@ +require 'spec_helper' + +RSpec.describe SubmitEnvelopeDownloadWorkflow do + let(:client) { instance_double(ArgoWorkflowsClient, namespace: 'credreg-staging') } + let(:community) { EnvelopeCommunity.find_or_create_by!(name: 'ce_registry') } + let(:envelope_download) { create(:envelope_download, envelope_community: community) } + let(:workflow) { { metadata: { name: 'ce-registry-download-abc123' } } } + let(:now) { Time.zone.parse('2026-03-06 12:00:00 UTC') } + + before do + allow(ArgoWorkflowsClient).to receive(:new).and_return(client) + allow(ENV).to receive(:fetch).and_call_original + allow(ENV).to receive(:fetch).with('ARGO_WORKFLOWS_TEMPLATE_NAME').and_return('s3-graphs-zip') + allow(ENV).to receive(:fetch).with('ARGO_WORKFLOWS_TASK_IMAGE').and_return('registry:s3-graphs-zip') + allow(ENV).to receive(:fetch).with('ARGO_WORKFLOWS_BATCH_SIZE', '25000').and_return('25000') + allow(ENV).to receive(:fetch) + .with('ARGO_WORKFLOWS_MAX_UNCOMPRESSED_ZIP_SIZE_BYTES', '209715200') + .and_return('209715200') + allow(ENV).to receive(:fetch).with('ARGO_WORKFLOWS_MAX_WORKERS', '4').and_return('4') + allow(ENV).to receive(:fetch).with('AWS_REGION').and_return('us-east-1') + allow(ENV).to receive(:fetch).with('ENVELOPE_DOWNLOADS_BUCKET').and_return('downloads-bucket') + allow(ENV).to receive(:fetch).with('ENVELOPE_GRAPHS_BUCKET').and_return('graphs-bucket') + end + + it 'submits the workflow and marks the download in progress' do + allow(client).to receive(:submit_workflow) + .with( + template_name: 's3-graphs-zip', + generate_name: 'ce-registry-download-', + parameters: { + 'batch-size' => '25000', + 'aws-region' => 'us-east-1', + 'destination-bucket' => 'downloads-bucket', + 'destination-prefix' => "ce_registry/downloads/#{envelope_download.id}", + 'environment' => MR.env, + 'max-uncompressed-zip-size-bytes' => '209715200', + 'max-workers' => '4', + 'source-bucket' => 'graphs-bucket', + 'source-prefix' => 'ce_registry', + 'task-image' => 'registry:s3-graphs-zip' + } + ).and_return(workflow) + + travel_to now do + described_class.call(envelope_download:) + end + + envelope_download.reload + expect(envelope_download.status).to eq('in_progress') + expect(envelope_download.started_at).to eq(now) + expect(envelope_download.finished_at).to be_nil + expect(envelope_download.internal_error_message).to be_nil + expect(envelope_download.argo_workflow_name).to eq('ce-registry-download-abc123') + expect(envelope_download.argo_workflow_namespace).to eq('credreg-staging') + expect(envelope_download.zip_files).to eq([]) + end + + it 'does not submit a second workflow when one is already in progress' do + envelope_download.update!( + argo_workflow_name: 'existing-workflow', + argo_workflow_namespace: 'credreg-staging', + status: :in_progress + ) + + expect(client).not_to receive(:submit_workflow) + + described_class.call(envelope_download:) + + expect(envelope_download.reload.argo_workflow_name).to eq('existing-workflow') + end +end diff --git a/spec/services/sync_envelope_download_workflow_status_spec.rb b/spec/services/sync_envelope_download_workflow_status_spec.rb new file mode 100644 index 00000000..63fd51dc --- /dev/null +++ b/spec/services/sync_envelope_download_workflow_status_spec.rb @@ -0,0 +1,163 @@ +require 'spec_helper' + +RSpec.describe SyncEnvelopeDownloadWorkflowStatus do + let(:api_error_class) do + Class.new(StandardError) do + attr_reader :code + + def initialize(code, message) + @code = code + super(message) + end + end + end + let(:client) { instance_double(ArgoWorkflowsClient) } + let(:community) { EnvelopeCommunity.find_or_create_by!(name: 'ce_registry') } + let(:envelope_download) do + create( + :envelope_download, + :in_progress, + envelope_community: community, + argo_workflow_name: 'ce-registry-download-abc123', + argo_workflow_namespace: 'credreg-staging' + ) + end + let(:s3_client) { instance_double(Aws::S3::Client) } + let(:s3_resource) { instance_double(Aws::S3::Resource) } + let(:bucket) { instance_double(Aws::S3::Bucket) } + let(:object) { instance_double(Aws::S3::Object, public_url: 'https://downloads.example/batch-00001.zip') } + + before do + stub_const('ArgoWorkflowsApiClient::ApiError', api_error_class) + allow(ArgoWorkflowsClient).to receive(:new).and_return(client) + allow(ENV).to receive(:fetch).and_call_original + allow(ENV).to receive(:fetch).with('AWS_REGION').and_return('us-east-1') + allow(ENV).to receive(:fetch).with('ENVELOPE_DOWNLOADS_BUCKET').and_return('downloads-bucket') + end + + context 'when the workflow succeeds' do + before do + allow(client).to receive(:get_workflow).with(name: 'ce-registry-download-abc123').and_return( + metadata: { + name: 'ce-registry-download-abc123' + }, + status: { + phase: 'Succeeded', + finishedAt: '2026-03-06T12:10:00Z', + nodes: { + :'ce-registry-download-abc123' => { + outputs: { + parameters: [ + { + name: 'zip-manifest', + value: { + batch_count: 2, + destination_bucket: 'downloads-bucket', + destination_prefix: "ce_registry/downloads/#{envelope_download.id}", + total_files: 12, + total_input_bytes: 123_456, + zip_files: [ + "ce_registry/downloads/#{envelope_download.id}/batch-00001.zip", + "ce_registry/downloads/#{envelope_download.id}/batch-00002.zip", + ], + zip_size_bytes: 45_678, + }.to_json + }, + ] + } + } + } + } + ) + + allow(Aws::S3::Client).to receive(:new).with(region: 'us-east-1').and_return(s3_client) + allow(Aws::S3::Resource).to receive(:new).with(region: 'us-east-1').and_return(s3_resource) + allow(s3_resource).to receive(:bucket).with('downloads-bucket').and_return(bucket) + allow(bucket).to receive(:object) + .with("ce_registry/downloads/#{envelope_download.id}/batch-00001.zip") + .and_return(object) + end + + it 'stores the download URL and marks the download finished' do + described_class.call(envelope_download:) + + envelope_download.reload + expect(envelope_download.status).to eq('finished') + expect(envelope_download.url).to eq('https://downloads.example/batch-00001.zip') + expect(envelope_download.zip_files).to eq( + [ + "ce_registry/downloads/#{envelope_download.id}/batch-00001.zip", + "ce_registry/downloads/#{envelope_download.id}/batch-00002.zip", + ] + ) + expect(envelope_download.internal_error_message).to be_nil + expect(envelope_download.finished_at).to eq(Time.zone.parse('2026-03-06T12:10:00Z')) + end + end + + context 'when the workflow fails' do + before do + allow(client).to receive(:get_workflow).with(name: 'ce-registry-download-abc123').and_return( + status: { + phase: 'Failed', + finishedAt: '2026-03-06T12:10:00Z', + message: 'zip task failed' + } + ) + end + + it 'marks the download failed' do + described_class.call(envelope_download:) + + envelope_download.reload + expect(envelope_download.status).to eq('finished') + expect(envelope_download.url).to be_nil + expect(envelope_download.zip_files).to eq([]) + expect(envelope_download.internal_error_message).to eq('zip task failed') + expect(envelope_download.finished_at).to eq(Time.zone.parse('2026-03-06T12:10:00Z')) + end + end + + context 'when the workflow lookup returns not found' do + let(:api_error) { ArgoWorkflowsApiClient::ApiError.new(404, 'Not Found') } + + before do + allow(client).to receive(:get_workflow) + .with(name: 'ce-registry-download-abc123') + .and_raise(api_error) + allow(MR.logger).to receive(:warn) + end + + it 'marks the download failed' do + described_class.call(envelope_download:) + + envelope_download.reload + expect(envelope_download.status).to eq('finished') + expect(envelope_download.argo_workflow_name).to be_nil + expect(envelope_download.argo_workflow_namespace).to be_nil + expect(envelope_download.url).to be_nil + expect(envelope_download.zip_files).to eq([]) + expect(envelope_download.internal_error_message).to eq('Argo workflow not found: Not Found') + expect(envelope_download.finished_at).to be_present + end + end + + context 'when the workflow lookup returns a transient API error' do + let(:api_error) { ArgoWorkflowsApiClient::ApiError.new(500, 'Internal Server Error') } + + before do + allow(client).to receive(:get_workflow) + .with(name: 'ce-registry-download-abc123') + .and_raise(api_error) + allow(MR.logger).to receive(:warn) + end + + it 'leaves the download in progress' do + expect { described_class.call(envelope_download:) } + .not_to change { envelope_download.reload.status } + + expect(envelope_download.argo_workflow_name).to eq('ce-registry-download-abc123') + expect(envelope_download.argo_workflow_namespace).to eq('credreg-staging') + end + end +end